From 566ac279ab35c8d57ada3d6f0c331ccb02c11ae7 Mon Sep 17 00:00:00 2001 From: Sam Weaver Date: Tue, 3 May 2022 15:41:17 -0400 Subject: [PATCH 001/438] d/aws_ecr_image: Add image URI attribute (#13671) --- internal/service/ecr/image_data_source.go | 29 +++++++++++++++++++ .../service/ecr/image_data_source_test.go | 2 ++ website/docs/d/ecr_image.html.markdown | 1 + 3 files changed, 32 insertions(+) diff --git a/internal/service/ecr/image_data_source.go b/internal/service/ecr/image_data_source.go index 38f0abd6397..15e751f33dd 100644 --- a/internal/service/ecr/image_data_source.go +++ b/internal/service/ecr/image_data_source.go @@ -47,6 +47,10 @@ func DataSourceImage() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "image_uri": { + Type: schema.TypeString, + Computed: true, + }, }, } } @@ -98,11 +102,36 @@ func dataSourceImageRead(d *schema.ResourceData, meta interface{}) error { image := imageDetails[0] + params2 := &ecr.DescribeRepositoriesInput{ + RepositoryNames: []*string{image.RepositoryName}, + RegistryId: image.RegistryId, + } + + var repositoryDetails []*ecr.Repository + log.Printf("[DEBUG] Reading ECR Repositories: %s", params2) + err2 := conn.DescribeRepositoriesPages(params2, func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool { + repositoryDetails = append(repositoryDetails, page.Repositories...) + return true + }) + if err2 != nil { + return fmt.Errorf("Error describing ECR repositories: %w", err) + } + + if len(repositoryDetails) == 0 { + return fmt.Errorf("No repository found") + } + if len(repositoryDetails) > 1 { + return fmt.Errorf("More than one repository found for image") + } + + repository := repositoryDetails[0] + d.SetId(aws.StringValue(image.ImageDigest)) d.Set("registry_id", image.RegistryId) d.Set("image_digest", image.ImageDigest) d.Set("image_pushed_at", image.ImagePushedAt.Unix()) d.Set("image_size_in_bytes", image.ImageSizeInBytes) + d.Set("image_uri", aws.String(aws.StringValue(repository.RepositoryUri)+"@"+aws.StringValue(image.ImageDigest))) if err := d.Set("image_tags", aws.StringValueSlice(image.ImageTags)); err != nil { return fmt.Errorf("failed to set image_tags: %w", err) } diff --git a/internal/service/ecr/image_data_source_test.go b/internal/service/ecr/image_data_source_test.go index c6f373f8672..31185df6661 100644 --- a/internal/service/ecr/image_data_source_test.go +++ b/internal/service/ecr/image_data_source_test.go @@ -27,9 +27,11 @@ func TestAccECRImageDataSource_ecrImage(t *testing.T) { resource.TestCheckResourceAttrSet(resourceByTag, "image_digest"), resource.TestCheckResourceAttrSet(resourceByTag, "image_pushed_at"), resource.TestCheckResourceAttrSet(resourceByTag, "image_size_in_bytes"), + resource.TestCheckResourceAttrSet(resourceByTag, "image_uri"), testCheckTagInImageTags(resourceByTag, tag), resource.TestCheckResourceAttrSet(resourceByDigest, "image_pushed_at"), resource.TestCheckResourceAttrSet(resourceByDigest, "image_size_in_bytes"), + resource.TestCheckResourceAttrSet(resourceByDigest, "image_uri"), testCheckTagInImageTags(resourceByDigest, tag), ), }, diff --git a/website/docs/d/ecr_image.html.markdown b/website/docs/d/ecr_image.html.markdown index 65d6242bc57..b4918422290 100644 --- a/website/docs/d/ecr_image.html.markdown +++ b/website/docs/d/ecr_image.html.markdown @@ -36,3 +36,4 @@ In addition to all arguments above, the following attributes are exported: * `image_pushed_at` - The date and time, expressed as a unix timestamp, at which the current image was pushed to the repository. * `image_size_in_bytes` - The size, in bytes, of the image in the repository. * `image_tags` - The list of tags associated with this image. +* `image_uri` - The URI for the specific image version specified by `image_tag` or `image_digest`. From 0a3ba8daea5a78e83dbc62e741d6f9fa4072fd62 Mon Sep 17 00:00:00 2001 From: Sam Weaver Date: Tue, 3 May 2022 15:53:09 -0400 Subject: [PATCH 002/438] Add changelog entry for PR 24526. --- .changelog/24526.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/24526.txt diff --git a/.changelog/24526.txt b/.changelog/24526.txt new file mode 100644 index 00000000000..7b392ba8fb6 --- /dev/null +++ b/.changelog/24526.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +data-source/aws_ecr_image: Add image_uri attribute +``` \ No newline at end of file From b68ed6872b86c6eca1c9bd8f9b615de887f1ec75 Mon Sep 17 00:00:00 2001 From: lvthillo Date: Fri, 17 Feb 2023 10:16:05 +0100 Subject: [PATCH 003/438] Add new use_new_mapping_type to dms ES. --- internal/service/dms/endpoint.go | 14 ++++++- internal/service/dms/endpoint_test.go | 50 +++++++++++++++++++++++ website/docs/r/dms_endpoint.html.markdown | 1 + 3 files changed, 64 insertions(+), 1 deletion(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 8dcad287f23..edb03b0e831 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -91,6 +91,14 @@ func ResourceEndpoint() *schema.Resource { // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. ForceNew: true, }, + "use_new_mapping_type": { + Type: schema.TypeBool, + Optional: true, + Default: false, + // API returns this error with ModifyEndpoint: + // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. + ForceNew: true, + }, }, }, }, @@ -771,6 +779,7 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } case engineNameKafka: input.KafkaSettings = expandKafkaSettings(d.Get("kafka_settings").([]interface{})[0].(map[string]interface{})) @@ -1063,12 +1072,14 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in "elasticsearch_settings.0.endpoint_uri", "elasticsearch_settings.0.error_retry_duration", "elasticsearch_settings.0.full_load_error_percentage", - "elasticsearch_settings.0.service_access_role_arn") { + "elasticsearch_settings.0.service_access_role_arn", + "elasticsearch_settings.0.use_new_mapping_type") { input.ElasticsearchSettings = &dms.ElasticsearchSettings{ ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } input.EngineName = aws.String(engineName) } @@ -1563,6 +1574,7 @@ func flattenOpenSearchSettings(settings *dms.ElasticsearchSettings) []map[string "error_retry_duration": aws.Int64Value(settings.ErrorRetryDuration), "full_load_error_percentage": aws.Int64Value(settings.FullLoadErrorPercentage), "service_access_role_arn": aws.StringValue(settings.ServiceAccessRoleArn), + "use_new_mapping_type": aws.BoolValue(settings.UseNewMappingType), } return []map[string]interface{}{m} diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 068f6922b28..677f911b5a9 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -469,6 +469,7 @@ func TestAccDMSEndpoint_OpenSearch_basic(t *testing.T) { testAccCheckResourceAttrRegionalHostname(resourceName, "elasticsearch_settings.0.endpoint_uri", "es", "search-estest"), resource.TestCheckResourceAttr(resourceName, "elasticsearch_settings.0.full_load_error_percentage", "10"), resource.TestCheckResourceAttr(resourceName, "elasticsearch_settings.0.error_retry_duration", "300"), + resource.TestCheckResourceAttr(resourceName, "elasticsearch_settings.0.use_new_mapping_type", "false"), ), }, { @@ -552,6 +553,35 @@ func TestAccDMSEndpoint_OpenSearch_errorRetryDuration(t *testing.T) { }) } +func TestAccDMSEndpoint_OpenSearch_UseNewMappingType(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_openSearchUseNewMappingType(rName, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "elasticsearch_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "elasticsearch_settings.0.use_new_mapping_type", "true"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"password"}, + }, + }, + }) +} + func TestAccDMSEndpoint_OpenSearch_fullLoadErrorPercentage(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dms_endpoint.test" @@ -2830,6 +2860,26 @@ resource "aws_dms_endpoint" "test" { `, rName, errorRetryDuration)) } +func testAccEndpointConfig_openSearchUseNewMappingType(rName string, useNewMappingType bool) string { + return acctest.ConfigCompose( + testAccEndpointConfig_openSearchBase(rName), + fmt.Sprintf(` +resource "aws_dms_endpoint" "test" { + endpoint_id = %[1]q + endpoint_type = "target" + engine_name = "elasticsearch" + + elasticsearch_settings { + endpoint_uri = "search-estest.${data.aws_region.current.name}.es.${data.aws_partition.current.dns_suffix}" + use_new_mapping_type = %[2]t + service_access_role_arn = aws_iam_role.test.arn + } + + depends_on = [aws_iam_role_policy.test] +} +`, rName, useNewMappingType)) +} + func testAccEndpointConfig_openSearchFullLoadErrorPercentage(rName string, fullLoadErrorPercentage int) string { return acctest.ConfigCompose( testAccEndpointConfig_openSearchBase(rName), diff --git a/website/docs/r/dms_endpoint.html.markdown b/website/docs/r/dms_endpoint.html.markdown index cf4c3272d73..225bc241755 100644 --- a/website/docs/r/dms_endpoint.html.markdown +++ b/website/docs/r/dms_endpoint.html.markdown @@ -84,6 +84,7 @@ The following arguments are optional: * `error_retry_duration` - (Optional) Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is `300`. * `full_load_error_percentage` - (Optional) Maximum percentage of records that can fail to be written before a full load operation stops. Default is `10`. * `service_access_role_arn` - (Required) ARN of the IAM Role with permissions to write to the OpenSearch cluster. +* `use_new_mapping_type` - (Optional) Enable to migrate documentation using the documentation type `_doc`. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is `false`. ### kafka_settings From 982a7207e35e4acfb75886f13127ed210dc85a01 Mon Sep 17 00:00:00 2001 From: Jakob Date: Wed, 12 Apr 2023 20:13:07 -0500 Subject: [PATCH 004/438] allows move to new instance arn --- internal/service/dms/consts.go | 1 + internal/service/dms/replication_task.go | 33 ++++++++++++++++++++++-- internal/service/dms/wait.go | 17 ++++++++++++ 3 files changed, 49 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/consts.go b/internal/service/dms/consts.go index ab045b634c7..cabd7d01828 100644 --- a/internal/service/dms/consts.go +++ b/internal/service/dms/consts.go @@ -7,6 +7,7 @@ const ( replicationTaskStatusDeleting = "deleting" replicationTaskStatusFailed = "failed" replicationTaskStatusModifying = "modifying" + replicationTaskStatusMoving = "moving" replicationTaskStatusReady = "ready" replicationTaskStatusStopped = "stopped" replicationTaskStatusStopping = "stopping" diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index d5864702854..9558db6923f 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -60,7 +60,6 @@ func ResourceReplicationTask() *schema.Resource { "replication_instance_arn": { Type: schema.TypeString, Required: true, - ForceNew: true, ValidateFunc: verify.ValidARN, }, "replication_task_arn": { @@ -212,7 +211,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn() - if d.HasChangesExcept("tags", "tags_all", "start_replication_task") { + if d.HasChangesExcept("tags", "tags_all", "start_replication_task", "replication_instance_arn") { input := &dms.ModifyReplicationTaskInput{ ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), MigrationType: aws.String(d.Get("migration_type").(string)), @@ -261,6 +260,36 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } } + if d.HasChange("replication_instance_arn") { + input := &dms.MoveReplicationTaskInput{ + ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), + TargetReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), + } + status := d.Get("status").(string) + if status == replicationTaskStatusRunning { + log.Println("[DEBUG] stopping DMS replication task:", input) + if err := stopReplicationTask(ctx, d.Id(), conn); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + log.Println("[DEBUG] moving DMS replication task:", input) + _, err := conn.MoveReplicationTaskWithContext(ctx, input) + if err != nil { + return sdkdiag.AppendErrorf(diags, "moving DMS Replication Task (%s): %s", d.Id(), err) + } + + if err := waitReplicationTaskMoved(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) update: %s", d.Id(), err) + } + + if d.Get("start_replication_task").(bool) { + err := startReplicationTask(ctx, d.Id(), conn) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + } + } + if d.HasChanges("start_replication_task") { status := d.Get("status").(string) if d.Get("start_replication_task").(bool) { diff --git a/internal/service/dms/wait.go b/internal/service/dms/wait.go index 05b1b739ac8..c0736382b37 100644 --- a/internal/service/dms/wait.go +++ b/internal/service/dms/wait.go @@ -11,6 +11,7 @@ import ( const ( propagationTimeout = 2 * time.Minute replicationTaskRunningTimeout = 5 * time.Minute + moveTaskTimeout = 10 * time.Minute ) func waitEndpointDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { @@ -58,6 +59,22 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio return err } +func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { + stateConf := &retry.StateChangeConf{ + Pending: []string{replicationTaskStatusModifying, replicationTaskStatusMoving}, + Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, + Refresh: statusReplicationTask(ctx, conn, id), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 30 * time.Second, // Wait 30 secs before starting + } + + // Wait, catching any errors + _, err := stateConf.WaitForStateContext(ctx) + + return err +} + func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusCreating}, From c6b05aa19744a21a60d019bd0bd389f538613ab1 Mon Sep 17 00:00:00 2001 From: Jakob Date: Thu, 13 Apr 2023 21:14:18 -0500 Subject: [PATCH 005/438] fixes acc tests --- internal/service/dms/replication_task_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index 04f182fe7d5..37eb60d2782 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -410,7 +410,7 @@ resource "aws_dms_replication_task" "test" { migration_type = "full-load" replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn replication_task_id = %[1]q - replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"EnableLogContext\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" @@ -432,7 +432,7 @@ resource "aws_dms_replication_task" "test" { migration_type = %[2]q replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn replication_task_id = %[1]q - replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":%[3]d,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":%[3]d,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"EnableLogContext\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"%[4]s\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" @@ -454,7 +454,7 @@ resource "aws_dms_replication_task" "test" { migration_type = "cdc" replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn replication_task_id = %[2]q - replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"EnableLogContext\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn @@ -552,7 +552,7 @@ resource "aws_rds_cluster_parameter_group" "test" { resource "aws_rds_cluster" "test1" { cluster_identifier = "%[1]s-aurora-cluster-source" engine = "aurora-mysql" - engine_version = "5.7.mysql_aurora.2.11.0" + engine_version = "5.7.mysql_aurora.2.11.2" database_name = "tftest" master_username = "tftest" master_password = "mustbeeightcharaters" @@ -574,7 +574,7 @@ resource "aws_rds_cluster_instance" "test1" { resource "aws_rds_cluster" "test2" { cluster_identifier = "%[1]s-aurora-cluster-target" engine = "aurora-mysql" - engine_version = "5.7.mysql_aurora.2.11.0" + engine_version = "5.7.mysql_aurora.2.11.2" database_name = "tftest" master_username = "tftest" master_password = "mustbeeightcharaters" @@ -635,7 +635,7 @@ resource "aws_dms_replication_task" "test" { migration_type = "full-load-and-cdc" replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn replication_task_id = %[1]q - replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"EnableLogContext\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"%[3]s\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" @@ -820,7 +820,7 @@ resource "aws_dms_replication_task" "test" { migration_type = "full-load-and-cdc" replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn replication_task_id = %[1]q - replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"EnableLogContext\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" source_endpoint_arn = aws_dms_s3_endpoint.source.endpoint_arn table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" From ef79b678aa329134ecc3139f53c69ec9c8b14721 Mon Sep 17 00:00:00 2001 From: Jakob Ondrey Date: Thu, 13 Apr 2023 22:24:22 -0500 Subject: [PATCH 006/438] adds changelog --- .changelog/30721.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/30721.txt diff --git a/.changelog/30721.txt b/.changelog/30721.txt new file mode 100644 index 00000000000..464706991f9 --- /dev/null +++ b/.changelog/30721.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dms_replication_task: Allow in-place migration between DMS instances +``` \ No newline at end of file From fb8b829e58c2ad24683f83e4bb69dab0c450a87c Mon Sep 17 00:00:00 2001 From: Jakob Date: Fri, 14 Apr 2023 11:01:58 -0500 Subject: [PATCH 007/438] add acc test --- internal/service/dms/replication_task_test.go | 140 ++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index 37eb60d2782..dca2e1ccc5d 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -151,6 +151,39 @@ func TestAccDMSReplicationTask_update(t *testing.T) { }) } +func TestAccDMSReplicationTask_move(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_task.test" + instanceOne := "aws_dms_replication_instance.test" + instanceTwo := "aws_dms_replication_instance.test2" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationTaskDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationTaskConfig_move(rName, "aws_dms_replication_instance.test.replication_instance_arn"), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationInstanceExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "replication_task_arn"), + resource.TestCheckResourceAttrPair(resourceName, "replication_instance_arn", instanceOne, "replication_instance_arn"), + ), + }, + { + Config: testAccReplicationTaskConfig_move(rName, "aws_dms_replication_instance.test2.replication_instance_arn"), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationInstanceExists(ctx, resourceName), + resource.TestCheckResourceAttrSet(resourceName, "replication_task_arn"), + resource.TestCheckResourceAttrPair(resourceName, "replication_instance_arn", instanceTwo, "replication_instance_arn"), + ), + }, + }, + }) +} + func TestAccDMSReplicationTask_cdcStartPosition(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -402,6 +435,91 @@ resource "aws_dms_replication_instance" "test" { `, rName)) } +func replicationTaskConfigMove(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigAvailableAZsNoOptIn(), + fmt.Sprintf(` +data "aws_partition" "current" {} + +data "aws_region" "current" {} + +resource "aws_vpc" "test" { + cidr_block = "10.1.0.0/16" + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test1" { + cidr_block = "10.1.1.0/24" + availability_zone = data.aws_availability_zones.available.names[0] + vpc_id = aws_vpc.test.id + + tags = { + Name = %[1]q + } +} + +resource "aws_subnet" "test2" { + cidr_block = "10.1.2.0/24" + availability_zone = data.aws_availability_zones.available.names[1] + vpc_id = aws_vpc.test.id + + tags = { + Name = "%[1]s-2" + } +} + +resource "aws_dms_endpoint" "source" { + database_name = %[1]q + endpoint_id = "%[1]s-source" + endpoint_type = "source" + engine_name = "aurora" + server_name = "tf-test-cluster.cluster-xxxxxxx.${data.aws_region.current.name}.rds.${data.aws_partition.current.dns_suffix}" + port = 3306 + username = "tftest" + password = "tftest" +} + +resource "aws_dms_endpoint" "target" { + database_name = %[1]q + endpoint_id = "%[1]s-target" + endpoint_type = "target" + engine_name = "aurora" + server_name = "tf-test-cluster.cluster-xxxxxxx.${data.aws_region.current.name}.rds.${data.aws_partition.current.dns_suffix}" + port = 3306 + username = "tftest" + password = "tftest" +} + +resource "aws_dms_replication_subnet_group" "test" { + replication_subnet_group_id = %[1]q + replication_subnet_group_description = "terraform test for replication subnet group" + subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] +} + +resource "aws_dms_replication_instance" "test" { + allocated_storage = 5 + auto_minor_version_upgrade = true + replication_instance_class = "dms.c4.large" + replication_instance_id = %[1]q + preferred_maintenance_window = "sun:00:30-sun:02:30" + publicly_accessible = false + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id +} +resource "aws_dms_replication_instance" "test2" { + allocated_storage = 5 + auto_minor_version_upgrade = true + replication_instance_class = "dms.c4.large" + replication_instance_id = "%[1]s-2" + preferred_maintenance_window = "sun:00:30-sun:02:30" + publicly_accessible = false + replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id +} +`, rName)) +} + func testAccReplicationTaskConfig_basic(rName, tags string) string { return acctest.ConfigCompose( replicationTaskConfigBase(rName), @@ -445,6 +563,28 @@ resource "aws_dms_replication_task" "test" { `, rName, migType, memLimitTotal, ruleName)) } +func testAccReplicationTaskConfig_move(rName, rInstanceArn string) string { + + return acctest.ConfigCompose( + replicationTaskConfigMove(rName), + fmt.Sprintf(` +resource "aws_dms_replication_task" "test" { + migration_type = "full-load" + replication_instance_arn = %[2]s + replication_task_id = %[1]q + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"EnableLogContext\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + tags = { + Name = %[1]q + } + + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn +} +`, rName, rInstanceArn)) +} + func testAccReplicationTaskConfig_cdcStartPosition(rName, cdcStartPosition string) string { return acctest.ConfigCompose( replicationTaskConfigBase(rName), From 34f450c6f2d8bffc04455cec506289513332b2d1 Mon Sep 17 00:00:00 2001 From: Jakob Date: Thu, 20 Apr 2023 17:21:49 -0500 Subject: [PATCH 008/438] increases some timeouts --- internal/service/dms/wait.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/wait.go b/internal/service/dms/wait.go index c0736382b37..1b872f1750e 100644 --- a/internal/service/dms/wait.go +++ b/internal/service/dms/wait.go @@ -10,8 +10,8 @@ import ( const ( propagationTimeout = 2 * time.Minute - replicationTaskRunningTimeout = 5 * time.Minute - moveTaskTimeout = 10 * time.Minute + replicationTaskRunningTimeout = 7 * time.Minute + moveTaskTimeout = 15 * time.Minute ) func waitEndpointDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { From 6063d3e81dfcb62685d1e47d361124c89b46431b Mon Sep 17 00:00:00 2001 From: Albin Gustavsson Date: Fri, 5 May 2023 14:47:09 +0200 Subject: [PATCH 009/438] r/aws_kms_key: Add xks_key_id argument --- internal/service/kms/key.go | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/internal/service/kms/key.go b/internal/service/kms/key.go index c026122b66f..e7659f7f4bb 100644 --- a/internal/service/kms/key.go +++ b/internal/service/kms/key.go @@ -52,6 +52,12 @@ func ResourceKey() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 22), }, + "xks_key_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 128), + }, "customer_master_key_spec": { Type: schema.TypeString, Optional: true, @@ -143,7 +149,15 @@ func resourceKeyCreate(ctx context.Context, d *schema.ResourceData, meta interfa input.Policy = aws.String(v.(string)) } - if v, ok := d.GetOk("custom_key_store_id"); ok { + if v, ok := d.GetOk("xks_key_id"); ok { + if _, ok := d.GetOk("custom_key_store_id"); !ok { + return sdkdiag.AppendErrorf(diags, "custom_key_store_id must be set when xks_key_id is set") + } + + input.Origin = aws.String(kms.OriginTypeExternalKeyStore) + input.CustomKeyStoreId = aws.String(d.Get("custom_key_store_id").(string)) + input.XksKeyId = aws.String(v.(string)) + } else if v, ok := d.GetOk("custom_key_store_id"); ok { input.Origin = aws.String(kms.OriginTypeAwsCloudhsm) input.CustomKeyStoreId = aws.String(v.(string)) } @@ -212,6 +226,7 @@ func resourceKeyRead(ctx context.Context, d *schema.ResourceData, meta interface d.Set("arn", key.metadata.Arn) d.Set("custom_key_store_id", key.metadata.CustomKeyStoreId) + d.Set("xks_key_id", key.metadata.XksKeyConfiguration.Id) d.Set("customer_master_key_spec", key.metadata.CustomerMasterKeySpec) d.Set("description", key.metadata.Description) d.Set("enable_key_rotation", key.rotation) From 73607614efe4e5449c743be32503b23c243d19f5 Mon Sep 17 00:00:00 2001 From: Albin Gustavsson Date: Fri, 5 May 2023 16:24:35 +0200 Subject: [PATCH 010/438] r/aws_kms_key: Fix SIGSEGV when xks_key_id is unset --- internal/service/kms/key.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/internal/service/kms/key.go b/internal/service/kms/key.go index e7659f7f4bb..9d231d893e0 100644 --- a/internal/service/kms/key.go +++ b/internal/service/kms/key.go @@ -226,7 +226,6 @@ func resourceKeyRead(ctx context.Context, d *schema.ResourceData, meta interface d.Set("arn", key.metadata.Arn) d.Set("custom_key_store_id", key.metadata.CustomKeyStoreId) - d.Set("xks_key_id", key.metadata.XksKeyConfiguration.Id) d.Set("customer_master_key_spec", key.metadata.CustomerMasterKeySpec) d.Set("description", key.metadata.Description) d.Set("enable_key_rotation", key.rotation) @@ -235,6 +234,12 @@ func resourceKeyRead(ctx context.Context, d *schema.ResourceData, meta interface d.Set("key_usage", key.metadata.KeyUsage) d.Set("multi_region", key.metadata.MultiRegion) + if key.metadata.XksKeyConfiguration != nil { + d.Set("xks_key_id", key.metadata.XksKeyConfiguration.Id) + } else { + d.Set("xks_key_id", nil) + } + policyToSet, err := verify.PolicyToSet(d.Get("policy").(string), key.policy) if err != nil { return sdkdiag.AppendErrorf(diags, "while setting policy (%s), encountered: %s", key.policy, err) From 858e8783300a29408c361305856b1c33e041613f Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:17:51 -0500 Subject: [PATCH 011/438] Modified target_group.go to persist stickiness.app_cookie.cookie_name through updates between lb_cookie and app_cookie --- internal/service/elbv2/target_group.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 68fdbaecb97..4c33692be4a 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -752,6 +752,10 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta &elbv2.TargetGroupAttribute{ Key: aws.String("stickiness.lb_cookie.duration_seconds"), Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String("stickiness.app_cookie.cookie_name"), + Value: aws.String(stickiness["cookie_name"].(string)), }) case "app_cookie": attrs = append(attrs, From 27d24c1fd0a2dfdac9366629eec73ac43024ccb5 Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:30:12 -0500 Subject: [PATCH 012/438] Added acceptance test for changing ALB stickiness type --- internal/service/elbv2/target_group_test.go | 93 +++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 97f5a07c2a6..05f5e10435c 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -1450,6 +1450,99 @@ func TestAccELBV2TargetGroup_Stickiness_updateAppEnabled(t *testing.T) { }) } +func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { + ctx := acctest.Context(t) + var conf elbv2.TargetGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, elbv2.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_appStickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "app_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + }, + }) +} + func TestAccELBV2TargetGroup_HealthCheck_update(t *testing.T) { ctx := acctest.Context(t) var conf elbv2.TargetGroup From 2655cd5f917dd108f47df755ecffc2aa200bf903 Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:39:44 -0500 Subject: [PATCH 013/438] Added changelog file 31436.txt --- .changelog/31436.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/31436.txt diff --git a/.changelog/31436.txt b/.changelog/31436.txt new file mode 100644 index 00000000000..c36d19086b4 --- /dev/null +++ b/.changelog/31436.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_lb_target_group: Persist `stickiness.app_cookie.cookie_name` across changes between app_cookie and lb_cookie ALB stickiness +``` \ No newline at end of file From d2bcc3f781d7be13b9b9b05d3afe91766002868f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?CHAUVET=2C=20J=C3=A9r=C3=A9my=20=28EH=3AGROUP=29?= Date: Mon, 12 Jun 2023 22:42:44 +0200 Subject: [PATCH 014/438] feat: use RFC3339 date for cdc_start_time --- internal/service/dms/replication_task.go | 24 ++++---- internal/service/dms/replication_task_test.go | 55 ++++++++++++++++++- .../docs/r/dms_replication_task.html.markdown | 6 +- 3 files changed, 67 insertions(+), 18 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 81740fae5ea..b9bf75963b8 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "log" - "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -43,9 +42,9 @@ func ResourceReplicationTask() *schema.Resource { ConflictsWith: []string{"cdc_start_time"}, }, "cdc_start_time": { - Type: schema.TypeString, - Optional: true, - // Requires a Unix timestamp in seconds. Example 1484346880 + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.IsRFC3339Time, ConflictsWith: []string{"cdc_start_position"}, }, "migration_type": { @@ -135,11 +134,9 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("cdc_start_time"); ok { - seconds, err := strconv.ParseInt(v.(string), 10, 64) - if err != nil { - return sdkdiag.AppendErrorf(diags, "DMS create replication task. Invalid CDC Unix timestamp: %s", err) - } - request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + // Parse the RFC3339 date string into a time.Time object + dateTime, _ := time.Parse(time.RFC3339, v.(string)) + request.CdcStartTime = aws.Time(dateTime) } if v, ok := d.GetOk("replication_task_settings"); ok { @@ -224,11 +221,14 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("cdc_start_time") { - seconds, err := strconv.ParseInt(d.Get("cdc_start_time").(string), 10, 64) + // Parse the RFC3339 date string into a time.Time object + dateTime, err := time.Parse(time.RFC3339, d.Get("cdc_start_time").(string)) + if err != nil { - return sdkdiag.AppendErrorf(diags, "DMS update replication task. Invalid CRC Unix timestamp: %s", err) + return sdkdiag.AppendErrorf(diags, "DMS update replication task. Invalid cdc_start_time value: %s", err) } - input.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + + input.CdcStartTime = aws.Time(dateTime) } if d.HasChange("replication_task_settings") { diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index 141884dd57c..0e73f5563b9 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -3,9 +3,6 @@ package dms_test import ( "context" "fmt" - "regexp" - "testing" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -14,6 +11,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "regexp" + "strings" + "testing" + "time" ) func TestAccDMSReplicationTask_basic(t *testing.T) { @@ -179,6 +180,37 @@ func TestAccDMSReplicationTask_cdcStartPosition(t *testing.T) { }) } +func TestAccDMSReplicationTask_cdcStartTime(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_task.test" + + currentTime := time.Now().UTC() + rfc3339Time := currentTime.Format(time.RFC3339) + awsDmsExpectedOutput := strings.TrimRight(rfc3339Time, "Z") // AWS API drop "Z" part. + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationTaskDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationTaskConfig_cdcStartTime(rName, rfc3339Time), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationTaskExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "cdc_start_position", awsDmsExpectedOutput), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerifyIgnore: []string{"start_replication_task"}, + }, + }, + }) +} + func TestAccDMSReplicationTask_startReplicationTask(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -462,6 +494,23 @@ resource "aws_dms_replication_task" "test" { `, cdcStartPosition, rName)) } +func testAccReplicationTaskConfig_cdcStartTime(rName, cdcStartPosition string) string { + return acctest.ConfigCompose( + replicationTaskConfigBase(rName), + fmt.Sprintf(` +resource "aws_dms_replication_task" "test" { + cdc_start_time = %[1]q + migration_type = "cdc" + replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn + replication_task_id = %[2]q + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn +} +`, cdcStartPosition, rName)) +} + func testAccReplicationTaskConfig_start(rName string, startTask bool, ruleName string) string { return acctest.ConfigCompose( acctest.ConfigAvailableAZsNoOptIn(), diff --git a/website/docs/r/dms_replication_task.html.markdown b/website/docs/r/dms_replication_task.html.markdown index 35f486cbbd7..01726a887e5 100644 --- a/website/docs/r/dms_replication_task.html.markdown +++ b/website/docs/r/dms_replication_task.html.markdown @@ -17,7 +17,7 @@ Provides a DMS (Data Migration Service) replication task resource. DMS replicati ```terraform # Create a new replication task resource "aws_dms_replication_task" "test" { - cdc_start_time = 1484346880 + cdc_start_time = "2023-06-09T18:27:12Z" migration_type = "full-load" replication_instance_arn = aws_dms_replication_instance.test-dms-replication-instance-tf.replication_instance_arn replication_task_id = "test-dms-replication-task-tf" @@ -37,8 +37,8 @@ resource "aws_dms_replication_task" "test" { The following arguments are supported: -* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be in date, checkpoint, or LSN/SCN format depending on the source engine. For more information, see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). -* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) The Unix timestamp integer for the start of the Change Data Capture (CDC) operation. +* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the source engine. For more information, see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). +* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string for the start of the Change Data Capture (CDC) operation. * `migration_type` - (Required) The migration type. Can be one of `full-load | cdc | full-load-and-cdc`. * `replication_instance_arn` - (Required) The Amazon Resource Name (ARN) of the replication instance. * `replication_task_id` - (Required) The replication task identifier. From 006d5546d1e9c4d030c7a336c1e60b5000ae1497 Mon Sep 17 00:00:00 2001 From: Jeremy Chauvet Date: Mon, 12 Jun 2023 22:42:44 +0200 Subject: [PATCH 015/438] feat: use RFC3339 date for cdc_start_time --- internal/service/dms/replication_task.go | 24 ++++---- internal/service/dms/replication_task_test.go | 55 ++++++++++++++++++- .../docs/r/dms_replication_task.html.markdown | 6 +- 3 files changed, 67 insertions(+), 18 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 81740fae5ea..b9bf75963b8 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "log" - "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -43,9 +42,9 @@ func ResourceReplicationTask() *schema.Resource { ConflictsWith: []string{"cdc_start_time"}, }, "cdc_start_time": { - Type: schema.TypeString, - Optional: true, - // Requires a Unix timestamp in seconds. Example 1484346880 + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.IsRFC3339Time, ConflictsWith: []string{"cdc_start_position"}, }, "migration_type": { @@ -135,11 +134,9 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("cdc_start_time"); ok { - seconds, err := strconv.ParseInt(v.(string), 10, 64) - if err != nil { - return sdkdiag.AppendErrorf(diags, "DMS create replication task. Invalid CDC Unix timestamp: %s", err) - } - request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + // Parse the RFC3339 date string into a time.Time object + dateTime, _ := time.Parse(time.RFC3339, v.(string)) + request.CdcStartTime = aws.Time(dateTime) } if v, ok := d.GetOk("replication_task_settings"); ok { @@ -224,11 +221,14 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("cdc_start_time") { - seconds, err := strconv.ParseInt(d.Get("cdc_start_time").(string), 10, 64) + // Parse the RFC3339 date string into a time.Time object + dateTime, err := time.Parse(time.RFC3339, d.Get("cdc_start_time").(string)) + if err != nil { - return sdkdiag.AppendErrorf(diags, "DMS update replication task. Invalid CRC Unix timestamp: %s", err) + return sdkdiag.AppendErrorf(diags, "DMS update replication task. Invalid cdc_start_time value: %s", err) } - input.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + + input.CdcStartTime = aws.Time(dateTime) } if d.HasChange("replication_task_settings") { diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index 141884dd57c..0e73f5563b9 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -3,9 +3,6 @@ package dms_test import ( "context" "fmt" - "regexp" - "testing" - dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -14,6 +11,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "regexp" + "strings" + "testing" + "time" ) func TestAccDMSReplicationTask_basic(t *testing.T) { @@ -179,6 +180,37 @@ func TestAccDMSReplicationTask_cdcStartPosition(t *testing.T) { }) } +func TestAccDMSReplicationTask_cdcStartTime(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_task.test" + + currentTime := time.Now().UTC() + rfc3339Time := currentTime.Format(time.RFC3339) + awsDmsExpectedOutput := strings.TrimRight(rfc3339Time, "Z") // AWS API drop "Z" part. + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationTaskDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationTaskConfig_cdcStartTime(rName, rfc3339Time), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationTaskExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "cdc_start_position", awsDmsExpectedOutput), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerifyIgnore: []string{"start_replication_task"}, + }, + }, + }) +} + func TestAccDMSReplicationTask_startReplicationTask(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -462,6 +494,23 @@ resource "aws_dms_replication_task" "test" { `, cdcStartPosition, rName)) } +func testAccReplicationTaskConfig_cdcStartTime(rName, cdcStartPosition string) string { + return acctest.ConfigCompose( + replicationTaskConfigBase(rName), + fmt.Sprintf(` +resource "aws_dms_replication_task" "test" { + cdc_start_time = %[1]q + migration_type = "cdc" + replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn + replication_task_id = %[2]q + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn +} +`, cdcStartPosition, rName)) +} + func testAccReplicationTaskConfig_start(rName string, startTask bool, ruleName string) string { return acctest.ConfigCompose( acctest.ConfigAvailableAZsNoOptIn(), diff --git a/website/docs/r/dms_replication_task.html.markdown b/website/docs/r/dms_replication_task.html.markdown index 35f486cbbd7..01726a887e5 100644 --- a/website/docs/r/dms_replication_task.html.markdown +++ b/website/docs/r/dms_replication_task.html.markdown @@ -17,7 +17,7 @@ Provides a DMS (Data Migration Service) replication task resource. DMS replicati ```terraform # Create a new replication task resource "aws_dms_replication_task" "test" { - cdc_start_time = 1484346880 + cdc_start_time = "2023-06-09T18:27:12Z" migration_type = "full-load" replication_instance_arn = aws_dms_replication_instance.test-dms-replication-instance-tf.replication_instance_arn replication_task_id = "test-dms-replication-task-tf" @@ -37,8 +37,8 @@ resource "aws_dms_replication_task" "test" { The following arguments are supported: -* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be in date, checkpoint, or LSN/SCN format depending on the source engine. For more information, see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). -* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) The Unix timestamp integer for the start of the Change Data Capture (CDC) operation. +* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the source engine. For more information, see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). +* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string for the start of the Change Data Capture (CDC) operation. * `migration_type` - (Required) The migration type. Can be one of `full-load | cdc | full-load-and-cdc`. * `replication_instance_arn` - (Required) The Amazon Resource Name (ARN) of the replication instance. * `replication_task_id` - (Required) The replication task identifier. From 2472e542365cca84ee897d7ff71eb4f0dab0349e Mon Sep 17 00:00:00 2001 From: Jeremy Chauvet Date: Mon, 12 Jun 2023 23:33:47 +0200 Subject: [PATCH 016/438] feat: use RFC3339 date for cdc_start_time --- .changelog/31917.txt | 3 +++ website/docs/r/dms_replication_task.html.markdown | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 .changelog/31917.txt diff --git a/.changelog/31917.txt b/.changelog/31917.txt new file mode 100644 index 00000000000..df12fe538a1 --- /dev/null +++ b/.changelog/31917.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dms_replication_task: cdc_start_time parameter now use RFC3339 formatted date instead of UNIX timestamp. +``` \ No newline at end of file diff --git a/website/docs/r/dms_replication_task.html.markdown b/website/docs/r/dms_replication_task.html.markdown index 01726a887e5..77a4860d2f4 100644 --- a/website/docs/r/dms_replication_task.html.markdown +++ b/website/docs/r/dms_replication_task.html.markdown @@ -17,7 +17,7 @@ Provides a DMS (Data Migration Service) replication task resource. DMS replicati ```terraform # Create a new replication task resource "aws_dms_replication_task" "test" { - cdc_start_time = "2023-06-09T18:27:12Z" + cdc_start_time = "1993-05-21T05:50:00Z" migration_type = "full-load" replication_instance_arn = aws_dms_replication_instance.test-dms-replication-instance-tf.replication_instance_arn replication_task_id = "test-dms-replication-task-tf" From d26698b61a0c8e151a3c4b3796c2153f7a61a329 Mon Sep 17 00:00:00 2001 From: Jeremy Chauvet Date: Tue, 13 Jun 2023 09:21:14 +0200 Subject: [PATCH 017/438] feat: allow RFC3339 date for cdc_start_time --- .changelog/31917.txt | 2 +- internal/service/dms/replication_task.go | 2 +- website/docs/r/dms_replication_task.html.markdown | 8 ++++++-- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.changelog/31917.txt b/.changelog/31917.txt index df12fe538a1..b6f0cbda065 100644 --- a/.changelog/31917.txt +++ b/.changelog/31917.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_dms_replication_task: cdc_start_time parameter now use RFC3339 formatted date instead of UNIX timestamp. +resource/aws_dms_replication_task: allow cdc_start_time parameter to use RFC3339 formatted date additionally to a UNIX timestamp. ``` \ No newline at end of file diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index b9bf75963b8..89a038f3b26 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -44,7 +44,7 @@ func ResourceReplicationTask() *schema.Resource { "cdc_start_time": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.IsRFC3339Time, + ValidateFunc: verify.ValidStringDateOrPositiveInt, ConflictsWith: []string{"cdc_start_position"}, }, "migration_type": { diff --git a/website/docs/r/dms_replication_task.html.markdown b/website/docs/r/dms_replication_task.html.markdown index 77a4860d2f4..35e672c6905 100644 --- a/website/docs/r/dms_replication_task.html.markdown +++ b/website/docs/r/dms_replication_task.html.markdown @@ -37,8 +37,12 @@ resource "aws_dms_replication_task" "test" { The following arguments are supported: -* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the source engine. For more information, see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). -* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string for the start of the Change Data Capture (CDC) operation. +* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) + operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the + source engine. For more information, + see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). +* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string or UNIX timestamp for + the start of the Change Data Capture (CDC) operation. * `migration_type` - (Required) The migration type. Can be one of `full-load | cdc | full-load-and-cdc`. * `replication_instance_arn` - (Required) The Amazon Resource Name (ARN) of the replication instance. * `replication_task_id` - (Required) The replication task identifier. From 6ab260e1e5ace4a802f78da379b2c3ab61094e2a Mon Sep 17 00:00:00 2001 From: Jeremy Chauvet Date: Tue, 13 Jun 2023 18:00:29 +0200 Subject: [PATCH 018/438] feat: allow RFC3339 date for cdc_start_time --- internal/service/dms/replication_task.go | 17 +++++++-- internal/service/dms/replication_task_test.go | 36 ++++++++++++++++++- 2 files changed, 49 insertions(+), 4 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 89a038f3b26..f5a384bcd75 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "log" + "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -134,9 +135,19 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("cdc_start_time"); ok { - // Parse the RFC3339 date string into a time.Time object - dateTime, _ := time.Parse(time.RFC3339, v.(string)) - request.CdcStartTime = aws.Time(dateTime) + // Check if input is RFC3339 date string or UNIX timestamp. + dateTime, err := time.Parse(time.RFC3339, v.(string)) + + if err != nil { + // Not a valid RF3339 date, checking if this is a UNIX timestamp. + seconds, err := strconv.ParseInt(v.(string), 10, 64) + if err != nil { + return sdkdiag.AppendErrorf(diags, "DMS create replication task. Invalid Unix timestamp given for cdc_start_time parameter: %s", err) + } + request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + } else { + request.CdcStartTime = aws.Time(dateTime) + } } if v, ok := d.GetOk("replication_task_settings"); ok { diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index 0e73f5563b9..79da6d7c047 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -12,6 +12,7 @@ import ( tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "regexp" + "strconv" "strings" "testing" "time" @@ -180,7 +181,7 @@ func TestAccDMSReplicationTask_cdcStartPosition(t *testing.T) { }) } -func TestAccDMSReplicationTask_cdcStartTime(t *testing.T) { +func TestAccDMSReplicationTask_cdcStartTime_rfc3339_date(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_dms_replication_task.test" @@ -211,6 +212,39 @@ func TestAccDMSReplicationTask_cdcStartTime(t *testing.T) { }) } +func TestAccDMSReplicationTask_cdcStartTime_unix_timestamp(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_dms_replication_task.test" + + currentTime := time.Now().UTC() + rfc3339Time := currentTime.Format(time.RFC3339) + awsDmsExpectedOutput := strings.TrimRight(rfc3339Time, "Z") // AWS API drop "Z" part. + dateTime, _ := time.Parse(time.RFC3339, rfc3339Time) + unixDateTime := strconv.Itoa(int(dateTime.Unix())) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckReplicationTaskDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccReplicationTaskConfig_cdcStartTime(rName, unixDateTime), + Check: resource.ComposeTestCheckFunc( + testAccCheckReplicationTaskExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "cdc_start_position", awsDmsExpectedOutput), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerifyIgnore: []string{"start_replication_task"}, + }, + }, + }) +} + func TestAccDMSReplicationTask_startReplicationTask(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { From f8ba86f17bd7271fb69e3a1a4951baa1b62d9e36 Mon Sep 17 00:00:00 2001 From: exoego Date: Mon, 1 May 2023 08:47:41 +0900 Subject: [PATCH 019/438] Treat "snapshotting" status as pending when creating cluster from a snapshot --- .changelog/31077.txt | 3 +++ internal/service/memorydb/wait.go | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) create mode 100644 .changelog/31077.txt diff --git a/.changelog/31077.txt b/.changelog/31077.txt new file mode 100644 index 00000000000..85512f2fd72 --- /dev/null +++ b/.changelog/31077.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_memorydb_cluster: Treat "snapshotting" status as pending when creating cluster +``` \ No newline at end of file diff --git a/internal/service/memorydb/wait.go b/internal/service/memorydb/wait.go index cba30f066ec..153d9a2ee70 100644 --- a/internal/service/memorydb/wait.go +++ b/internal/service/memorydb/wait.go @@ -57,7 +57,7 @@ func waitACLDeleted(ctx context.Context, conn *memorydb.MemoryDB, aclId string) // waitClusterAvailable waits for MemoryDB Cluster to reach an active state after modifications. func waitClusterAvailable(ctx context.Context, conn *memorydb.MemoryDB, clusterId string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ - Pending: []string{ClusterStatusCreating, ClusterStatusUpdating}, + Pending: []string{ClusterStatusCreating, ClusterStatusUpdating, ClusterStatusSnapshotting}, Target: []string{ClusterStatusAvailable}, Refresh: statusCluster(ctx, conn, clusterId), Timeout: timeout, From fb06c2938b28c6b42e7588ed190d48917ce87119 Mon Sep 17 00:00:00 2001 From: Vinaykumar Saraogi Date: Mon, 2 Oct 2023 20:48:08 -0400 Subject: [PATCH 020/438] Make source_type as required and not optional --- internal/service/dms/event_subscription.go | 2 +- website/docs/r/dms_event_subscription.html.markdown | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 6e8226ba0ba..190433027de 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -79,7 +79,7 @@ func ResourceEventSubscription() *schema.Resource { }, "source_type": { Type: schema.TypeString, - Optional: true, + Required: true, // The API suppors modification but doing so loses all source_ids ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ diff --git a/website/docs/r/dms_event_subscription.html.markdown b/website/docs/r/dms_event_subscription.html.markdown index 6b8b401094b..3d53fe93610 100644 --- a/website/docs/r/dms_event_subscription.html.markdown +++ b/website/docs/r/dms_event_subscription.html.markdown @@ -34,7 +34,7 @@ This resource supports the following arguments: * `name` - (Required) Name of event subscription. * `enabled` - (Optional, Default: true) Whether the event subscription should be enabled. * `event_categories` - (Optional) List of event categories to listen for, see `DescribeEventCategories` for a canonical list. -* `source_type` - (Optional, Default: all events) Type of source for events. Valid values: `replication-instance` or `replication-task` +* `source_type` - (Required) Type of source for events. Valid values: `replication-instance` or `replication-task` * `source_ids` - (Required) Ids of sources to listen to. * `sns_topic_arn` - (Required) SNS topic arn to send events on. * `tags` - (Optional) Map of resource tags to assign to the resource. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. From 192d11eb4cd2253841b74d978b55aea03f229cf8 Mon Sep 17 00:00:00 2001 From: Vinaykumar Saraogi Date: Mon, 2 Oct 2023 20:48:52 -0400 Subject: [PATCH 021/438] source_ids is a required attribute --- internal/service/dms/event_subscription.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 190433027de..994759fdf34 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -75,7 +75,7 @@ func ResourceEventSubscription() *schema.Resource { Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, ForceNew: true, - Optional: true, + Required: true, }, "source_type": { Type: schema.TypeString, From 38cb72e7cdde1061e6c59e5998e1f28f258b4524 Mon Sep 17 00:00:00 2001 From: Yan Date: Thu, 2 Nov 2023 12:09:50 -0400 Subject: [PATCH 022/438] Update sqs_queue.html.markdown --- website/docs/r/sqs_queue.html.markdown | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/website/docs/r/sqs_queue.html.markdown b/website/docs/r/sqs_queue.html.markdown index 755d258154d..4457f6f6203 100644 --- a/website/docs/r/sqs_queue.html.markdown +++ b/website/docs/r/sqs_queue.html.markdown @@ -22,6 +22,7 @@ resource "aws_sqs_queue" "terraform_queue" { maxReceiveCount = 4 }) + tags = { Environment = "production" } @@ -52,8 +53,21 @@ resource "aws_sqs_queue" "terraform_queue" { ## Dead-letter queue ```terraform +resource "aws_sqs_queue" "terraform_queue" { + name = "terraform-example-queue" + redrive_policy = jsonencode({ + deadLetterTargetArn = aws_sqs_queue.terraform_queue_deadletter.arn + maxReceiveCount = 4 + }) +} + resource "aws_sqs_queue" "terraform_queue_deadletter" { name = "terraform-example-deadletter-queue" +} + +resource "aws_sqs_queue_redrive_allow_policy" "terraform_queue_redrive_allow_policy" { + queue_url = aws_sqs_queue.terraform_queue_deadletter.id + redrive_allow_policy = jsonencode({ redrivePermission = "byQueue", sourceQueueArns = [aws_sqs_queue.terraform_queue.arn] From 78e208c2bd6d2e8b56f2a03a5750200da8b6d426 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 2 Nov 2023 18:14:10 -0400 Subject: [PATCH 023/438] d/ mq broker versions --- .../service/mq/engine_versions_data_source.go | 125 ++++++++++++++++++ .../mq/engine_versions_data_source_test.go | 48 +++++++ internal/service/mq/service_package_gen.go | 5 + .../docs/d/mq_engine_versions.html.markdown | 44 ++++++ 4 files changed, 222 insertions(+) create mode 100644 internal/service/mq/engine_versions_data_source.go create mode 100644 internal/service/mq/engine_versions_data_source_test.go create mode 100644 website/docs/d/mq_engine_versions.html.markdown diff --git a/internal/service/mq/engine_versions_data_source.go b/internal/service/mq/engine_versions_data_source.go new file mode 100644 index 00000000000..0dfd17b7805 --- /dev/null +++ b/internal/service/mq/engine_versions_data_source.go @@ -0,0 +1,125 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mq + +import ( + "context" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/mq" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. +// @SDKDataSource("aws_mq_engine_versions", name="Engine Versions") +func DataSourceEngineVersions() *schema.Resource { + return &schema.Resource{ + ReadWithoutTimeout: dataSourceEngineVersionsRead, + Schema: map[string]*schema.Schema{ + "filters": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "engine_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringInSlice([]string{"ACTIVEMQ", "RABBITMQ"}, false), + }, + }, + }, + }, + "broker_engine_types": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "engine_type": { + Type: schema.TypeString, + Computed: true, + }, + "engine_versions": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, + }, + }, + }, + }, + } +} + +const ( + DSNameEngineVersions = "Engine Versions Data Source" +) + +func dataSourceEngineVersionsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).MQConn(ctx) + + input := &mq.DescribeBrokerEngineTypesInput{} + if v, ok := d.GetOk("filters"); ok { + filters := v.(*schema.Set).List() + for _, filter := range filters { + f := filter.(map[string]interface{}) + if v, ok := f["engine_type"]; ok { + input.EngineType = aws.String(v.(string)) + } + } + } + d.SetId(id.UniqueId()) + + var types []*mq.BrokerEngineType + for { + out, err := conn.DescribeBrokerEngineTypes(input) + if err != nil { + return append(diags, create.DiagError(names.MQ, create.ErrActionReading, DSNameEngineVersions, "", err)...) + } + + types = append(types, out.BrokerEngineTypes...) + if out.NextToken == nil { + break + } + input.NextToken = out.NextToken + } + if err := d.Set("broker_engine_types", flattenBrokerList(types)); err != nil { + return append(diags, create.DiagError(names.MQ, create.ErrActionSetting, DSNameEngineVersions, d.Id(), err)...) + } + + return diags +} + +func flattenBrokerList(types []*mq.BrokerEngineType) (brokers []map[string]interface{}) { + for _, broker := range types { + brokers = append(brokers, map[string]interface{}{ + "engine_type": broker.EngineType, + "engine_versions": flattenEngineVersions(broker.EngineVersions), + }) + } + return +} + +func flattenEngineVersions(engines []*mq.EngineVersion) (versions []map[string]string) { + for _, engine := range engines { + versions = append(versions, map[string]string{ + "name": aws.StringValue(engine.Name), + }) + } + return +} diff --git a/internal/service/mq/engine_versions_data_source_test.go b/internal/service/mq/engine_versions_data_source_test.go new file mode 100644 index 00000000000..73bdcd62a93 --- /dev/null +++ b/internal/service/mq/engine_versions_data_source_test.go @@ -0,0 +1,48 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package mq_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go/service/mq" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" +) + +func TestAccMQEngineVersionsDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_mq_engine_versions.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, mq.EndpointsID) + testAccPreCheck(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, mq.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: acctest.CheckDestroyNoop, + Steps: []resource.TestStep{ + { + Config: testAccEngineVersionsDataSourceConfig_basic("ACTIVEMQ"), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr(dataSourceName, "broker_engine_types.#", "1"), + resource.TestCheckResourceAttr(dataSourceName, "broker_engine_types.0.engine_type", "ACTIVEMQ"), + ), + }, + }, + }) +} + +func testAccEngineVersionsDataSourceConfig_basic(engineType string) string { + return fmt.Sprintf(` +data "aws_mq_engine_versions" "test" { + filters { + engine_type = %[1]q + } +} +`, engineType) +} diff --git a/internal/service/mq/service_package_gen.go b/internal/service/mq/service_package_gen.go index 65642098572..c47197f82db 100644 --- a/internal/service/mq/service_package_gen.go +++ b/internal/service/mq/service_package_gen.go @@ -33,6 +33,11 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac Factory: DataSourceBrokerInstanceTypeOfferings, TypeName: "aws_mq_broker_instance_type_offerings", }, + { + Factory: DataSourceEngineVersions, + TypeName: "aws_mq_engine_versions", + Name: "Engine Versions", + }, } } diff --git a/website/docs/d/mq_engine_versions.html.markdown b/website/docs/d/mq_engine_versions.html.markdown new file mode 100644 index 00000000000..4dea6b8723b --- /dev/null +++ b/website/docs/d/mq_engine_versions.html.markdown @@ -0,0 +1,44 @@ +--- +subcategory: "MQ" +layout: "aws" +page_title: "AWS: aws_mq_engine_versions" +description: |- + Terraform data source for managing an AWS MQ Engine Versions. +--- + +# Data Source: aws_mq_engine_versions + +Terraform data source for managing an AWS MQ Engine Versions. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_mq_engine_versions" "example" { + filters { + engine_type = "ACTIVEMQ" + } +} +``` + +## Argument Reference + +* `filters` - Filters the results of the request. See [Filters](#filters). + +### filter + +The following filters are optional. + +* `engine_type` - (Optional) The database engine to return version details for. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `broker_engine_types` - A list of available engine types and versions. See [Engine Types](#engine-types). + +### engine-types + +* `engine_type` - The broker's engine type. +* `engine_versions` - The list of engine versions. From 8eb7bf5f6789decf7eee13c4f41c7e22ade03167 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 2 Nov 2023 18:23:47 -0400 Subject: [PATCH 024/438] changelog --- .changelog/34232.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34232.txt diff --git a/.changelog/34232.txt b/.changelog/34232.txt new file mode 100644 index 00000000000..0f3e7ff3eb8 --- /dev/null +++ b/.changelog/34232.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_mq_engine_versions +``` From f72325007e61b998dfc218e3abb9d73078a37ecf Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 2 Nov 2023 18:28:16 -0400 Subject: [PATCH 025/438] add with context --- internal/service/mq/engine_versions_data_source.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/mq/engine_versions_data_source.go b/internal/service/mq/engine_versions_data_source.go index 0dfd17b7805..8515fc011fb 100644 --- a/internal/service/mq/engine_versions_data_source.go +++ b/internal/service/mq/engine_versions_data_source.go @@ -87,7 +87,7 @@ func dataSourceEngineVersionsRead(ctx context.Context, d *schema.ResourceData, m var types []*mq.BrokerEngineType for { - out, err := conn.DescribeBrokerEngineTypes(input) + out, err := conn.DescribeBrokerEngineTypesWithContext(ctx, input) if err != nil { return append(diags, create.DiagError(names.MQ, create.ErrActionReading, DSNameEngineVersions, "", err)...) } From 4010b11bd334b12a4098595a314c140aba764cd8 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Fri, 3 Nov 2023 09:11:12 -0400 Subject: [PATCH 026/438] plural --- website/docs/d/mq_engine_versions.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/mq_engine_versions.html.markdown b/website/docs/d/mq_engine_versions.html.markdown index 4dea6b8723b..fbc9c4c1d22 100644 --- a/website/docs/d/mq_engine_versions.html.markdown +++ b/website/docs/d/mq_engine_versions.html.markdown @@ -26,7 +26,7 @@ data "aws_mq_engine_versions" "example" { * `filters` - Filters the results of the request. See [Filters](#filters). -### filter +### filters The following filters are optional. From 6a02fdd63a45420e2214db4f96ab6509ef00eae6 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Fri, 3 Nov 2023 16:41:53 -0400 Subject: [PATCH 027/438] SDK V2 --- go.mod | 1 + go.sum | 2 ++ internal/conns/awsclient_gen.go | 5 +++++ .../service/mq/engine_versions_data_source.go | 22 ++++++++++--------- internal/service/mq/service_package_gen.go | 13 +++++++++++ names/names_data.csv | 2 +- 6 files changed, 34 insertions(+), 11 deletions(-) diff --git a/go.mod b/go.mod index 90071ce15dd..bf419177feb 100644 --- a/go.mod +++ b/go.mod @@ -45,6 +45,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.23.0 github.com/aws/aws-sdk-go-v2/service/medialive v1.40.0 github.com/aws/aws-sdk-go-v2/service/mediapackage v1.26.0 + github.com/aws/aws-sdk-go-v2/service/mq v1.19.0 github.com/aws/aws-sdk-go-v2/service/oam v1.6.0 github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.8.0 github.com/aws/aws-sdk-go-v2/service/pipes v1.6.0 diff --git a/go.sum b/go.sum index cd069fb6136..ce2f3eaace7 100644 --- a/go.sum +++ b/go.sum @@ -133,6 +133,8 @@ github.com/aws/aws-sdk-go-v2/service/medialive v1.40.0 h1:KXNXlxUVcsovV20L4AN0Sd github.com/aws/aws-sdk-go-v2/service/medialive v1.40.0/go.mod h1:eGKrvycmxzpiY8McGBQirqAu5aG/FUDlEnfcWdRByuE= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.26.0 h1:P7LmCFgek9AFt0qVXdi+UmLQCNtzvIBjjt+BEeN7yzA= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.26.0/go.mod h1:bVWnSoPOtMkeXpacWU4SrZkZVTBH8oXFuqhTfcQSBXQ= +github.com/aws/aws-sdk-go-v2/service/mq v1.19.0 h1:9gf5yIYxN+ASrWlgpLC5w2FgL+CTu7hxx4HWFS60NUE= +github.com/aws/aws-sdk-go-v2/service/mq v1.19.0/go.mod h1:taU41fCZSh94vkDwHlxMn+JeRPwu+lCzDLF1WCsTNUk= github.com/aws/aws-sdk-go-v2/service/oam v1.6.0 h1:XP1GGbtv6hIc+sW7Ox7FbWpnUu8r3kpvilzDX16M/IY= github.com/aws/aws-sdk-go-v2/service/oam v1.6.0/go.mod h1:vpZZihQNeatrVSplIVGqqALTmNqy9hAZztlrIJ1m/us= github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.8.0 h1:gXOVrOah9FrDG/DjTIqJYTudJSqlnDRlWB5TP4z9aXU= diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 961e333f78c..4ca6c8eb73e 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -39,6 +39,7 @@ import ( mediaconnect_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediaconnect" medialive_sdkv2 "github.com/aws/aws-sdk-go-v2/service/medialive" mediapackage_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mediapackage" + mq_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mq" oam_sdkv2 "github.com/aws/aws-sdk-go-v2/service/oam" opensearchserverless_sdkv2 "github.com/aws/aws-sdk-go-v2/service/opensearchserverless" pipes_sdkv2 "github.com/aws/aws-sdk-go-v2/service/pipes" @@ -755,6 +756,10 @@ func (c *AWSClient) MQConn(ctx context.Context) *mq_sdkv1.MQ { return errs.Must(conn[*mq_sdkv1.MQ](ctx, c, names.MQ)) } +func (c *AWSClient) MQClient(ctx context.Context) *mq_sdkv2.Client { + return errs.Must(client[*mq_sdkv2.Client](ctx, c, names.MQ)) +} + func (c *AWSClient) MWAAConn(ctx context.Context) *mwaa_sdkv1.MWAA { return errs.Must(conn[*mwaa_sdkv1.MWAA](ctx, c, names.MWAA)) } diff --git a/internal/service/mq/engine_versions_data_source.go b/internal/service/mq/engine_versions_data_source.go index 8515fc011fb..3bcc4b54b76 100644 --- a/internal/service/mq/engine_versions_data_source.go +++ b/internal/service/mq/engine_versions_data_source.go @@ -6,8 +6,9 @@ package mq import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/mq" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/mq" + "github.com/aws/aws-sdk-go-v2/service/mq/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -71,7 +72,7 @@ const ( func dataSourceEngineVersionsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).MQConn(ctx) + client := meta.(*conns.AWSClient).MQClient(ctx) input := &mq.DescribeBrokerEngineTypesInput{} if v, ok := d.GetOk("filters"); ok { @@ -85,27 +86,28 @@ func dataSourceEngineVersionsRead(ctx context.Context, d *schema.ResourceData, m } d.SetId(id.UniqueId()) - var types []*mq.BrokerEngineType + var engineTypes []types.BrokerEngineType for { - out, err := conn.DescribeBrokerEngineTypesWithContext(ctx, input) + out, err := client.DescribeBrokerEngineTypes(ctx, input) if err != nil { return append(diags, create.DiagError(names.MQ, create.ErrActionReading, DSNameEngineVersions, "", err)...) } - types = append(types, out.BrokerEngineTypes...) + engineTypes = append(engineTypes, out.BrokerEngineTypes...) if out.NextToken == nil { break } input.NextToken = out.NextToken } - if err := d.Set("broker_engine_types", flattenBrokerList(types)); err != nil { + + if err := d.Set("broker_engine_types", flattenBrokerList(engineTypes)); err != nil { return append(diags, create.DiagError(names.MQ, create.ErrActionSetting, DSNameEngineVersions, d.Id(), err)...) } return diags } -func flattenBrokerList(types []*mq.BrokerEngineType) (brokers []map[string]interface{}) { +func flattenBrokerList(types []types.BrokerEngineType) (brokers []map[string]interface{}) { for _, broker := range types { brokers = append(brokers, map[string]interface{}{ "engine_type": broker.EngineType, @@ -115,10 +117,10 @@ func flattenBrokerList(types []*mq.BrokerEngineType) (brokers []map[string]inter return } -func flattenEngineVersions(engines []*mq.EngineVersion) (versions []map[string]string) { +func flattenEngineVersions(engines []types.EngineVersion) (versions []map[string]string) { for _, engine := range engines { versions = append(versions, map[string]string{ - "name": aws.StringValue(engine.Name), + "name": aws.ToString(engine.Name), }) } return diff --git a/internal/service/mq/service_package_gen.go b/internal/service/mq/service_package_gen.go index c47197f82db..d6ff59beda0 100644 --- a/internal/service/mq/service_package_gen.go +++ b/internal/service/mq/service_package_gen.go @@ -5,6 +5,8 @@ package mq import ( "context" + aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + mq_sdkv2 "github.com/aws/aws-sdk-go-v2/service/mq" aws_sdkv1 "github.com/aws/aws-sdk-go/aws" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" mq_sdkv1 "github.com/aws/aws-sdk-go/service/mq" @@ -73,6 +75,17 @@ func (p *servicePackage) NewConn(ctx context.Context, config map[string]any) (*m return mq_sdkv1.New(sess.Copy(&aws_sdkv1.Config{Endpoint: aws_sdkv1.String(config["endpoint"].(string))})), nil } +// NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*mq_sdkv2.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) + + return mq_sdkv2.NewFromConfig(cfg, func(o *mq_sdkv2.Options) { + if endpoint := config["endpoint"].(string); endpoint != "" { + o.BaseEndpoint = aws_sdkv2.String(endpoint) + } + }), nil +} + func ServicePackage(ctx context.Context) conns.ServicePackage { return &servicePackage{} } diff --git a/names/names_data.csv b/names/names_data.csv index 851099c2745..7e4e62adb2b 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -249,7 +249,7 @@ mobile,mobile,mobile,mobile,,mobile,,,Mobile,Mobile,,1,,,aws_mobile_,,mobile_,Mo ,,,,,,,,,,,,,,,,,Mobile SDK for Unity,AWS,x,,,,,No SDK support ,,,,,,,,,,,,,,,,,Mobile SDK for Xamarin,AWS,x,,,,,No SDK support ,,,,,,,,,,,,,,,,,Monitron,Amazon,x,,,,,No SDK support -mq,mq,mq,mq,,mq,,,MQ,MQ,,1,,,aws_mq_,,mq_,MQ,Amazon,,,,,, +mq,mq,mq,mq,,mq,,,MQ,MQ,,1,2,,aws_mq_,,mq_,MQ,Amazon,,,,,, mturk,mturk,mturk,mturk,,mturk,,,MTurk,MTurk,,1,,,aws_mturk_,,mturk_,MTurk (Mechanical Turk),Amazon,,x,,,, mwaa,mwaa,mwaa,mwaa,,mwaa,,,MWAA,MWAA,,1,,,aws_mwaa_,,mwaa_,MWAA (Managed Workflows for Apache Airflow),Amazon,,,,,, neptune,neptune,neptune,neptune,,neptune,,,Neptune,Neptune,,1,,,aws_neptune_,,neptune_,Neptune,Amazon,,,,,, From 6e96880094447be33a0f089f7f18d90007ba6974 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sat, 4 Nov 2023 13:24:24 +0100 Subject: [PATCH 028/438] feat: added policy doc on verified access endpoint --- internal/service/ec2/find.go | 24 ++++++ .../service/ec2/verifiedaccess_endpoint.go | 30 +++++++ .../ec2/verifiedaccess_endpoint_test.go | 84 +++++++++++++++++++ .../r/verifiedaccess_endpoint.html.markdown | 1 + 4 files changed, 139 insertions(+) diff --git a/internal/service/ec2/find.go b/internal/service/ec2/find.go index bf4d0381e98..e739a198ea7 100644 --- a/internal/service/ec2/find.go +++ b/internal/service/ec2/find.go @@ -7046,6 +7046,30 @@ func FindVerifiedAccessGroupPolicyByID(ctx context.Context, conn *ec2_sdkv2.Clie return output, nil } +func FindVerifiedAccessEndpointPolicyByID(ctx context.Context, conn *ec2_sdkv2.Client, id string) (*ec2_sdkv2.GetVerifiedAccessEndpointPolicyOutput, error) { + input := &ec2_sdkv2.GetVerifiedAccessEndpointPolicyInput{ + VerifiedAccessEndpointId: &id, + } + output, err := conn.GetVerifiedAccessEndpointPolicy(ctx, input) + + if tfawserr_sdkv2.ErrCodeEquals(err, errCodeInvalidVerifiedAccessEndpointIdNotFound) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func FindVerifiedAccessGroup(ctx context.Context, conn *ec2_sdkv2.Client, input *ec2_sdkv2.DescribeVerifiedAccessGroupsInput) (*awstypes.VerifiedAccessGroup, error) { output, err := FindVerifiedAccessGroups(ctx, conn, input) diff --git a/internal/service/ec2/verifiedaccess_endpoint.go b/internal/service/ec2/verifiedaccess_endpoint.go index 731f7108325..6921e4351e0 100644 --- a/internal/service/ec2/verifiedaccess_endpoint.go +++ b/internal/service/ec2/verifiedaccess_endpoint.go @@ -138,6 +138,10 @@ func ResourceVerifiedAccessEndpoint() *schema.Resource { }, }, }, + "policy_document": { + Type: schema.TypeString, + Optional: true, + }, "security_group_ids": { Type: schema.TypeSet, Optional: true, @@ -206,6 +210,10 @@ func resourceVerifiedAccessEndpointCreate(ctx context.Context, d *schema.Resourc input.NetworkInterfaceOptions = expandCreateVerifiedAccessEndpointEniOptions(v.([]interface{})[0].(map[string]interface{})) } + if v, ok := d.GetOk("policy_document"); ok { + input.PolicyDocument = aws.String(v.(string)) + } + if v, ok := d.GetOk("security_group_ids"); ok && v.(*schema.Set).Len() > 0 { input.SecurityGroupIds = flex.ExpandStringValueSet(v.(*schema.Set)) } @@ -266,6 +274,14 @@ func resourceVerifiedAccessEndpointRead(ctx context.Context, d *schema.ResourceD d.Set("verified_access_group_id", ep.VerifiedAccessGroupId) d.Set("verified_access_instance_id", ep.VerifiedAccessInstanceId) + output, err := FindVerifiedAccessEndpointPolicyByID(ctx, conn, d.Id()) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading Verified Access Endpoint (%s) policy: %s", d.Id(), err) + } + + d.Set("policy_document", output.PolicyDocument) + return diags } @@ -309,6 +325,20 @@ func resourceVerifiedAccessEndpointUpdate(ctx context.Context, d *schema.Resourc } } + if d.HasChange("policy_document") { + input := &ec2.ModifyVerifiedAccessEndpointPolicyInput{ + PolicyDocument: aws.String(d.Get("policy_document").(string)), + VerifiedAccessEndpointId: aws.String(d.Id()), + PolicyEnabled: aws.Bool(true), + } + + _, err := conn.ModifyVerifiedAccessEndpointPolicy(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating Verified Access Endpoint (%s) policy: %s", d.Id(), err) + } + } + return append(diags, resourceVerifiedAccessEndpointRead(ctx, d, meta)...) } diff --git a/internal/service/ec2/verifiedaccess_endpoint_test.go b/internal/service/ec2/verifiedaccess_endpoint_test.go index 75c226f511c..8ffdd6e4e80 100644 --- a/internal/service/ec2/verifiedaccess_endpoint_test.go +++ b/internal/service/ec2/verifiedaccess_endpoint_test.go @@ -196,6 +196,49 @@ func TestAccVerifiedAccessEndpoint_disappears(t *testing.T) { }) } +func TestAccVerifiedAccessEndpoint_policyDocument(t *testing.T) { + ctx := acctest.Context(t) + var v types.VerifiedAccessEndpoint + resourceName := "aws_verifiedaccess_endpoint.test" + key := acctest.TLSRSAPrivateKeyPEM(t, 2048) + certificate := acctest.TLSRSAX509SelfSignedCertificatePEM(t, key, "example.com") + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + policyDoc := "permit(principal, action, resource) \nwhen {\ncontext.http_request.method == \"GET\"\n};" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + testAccPreCheckVerifiedAccess(ctx, t) + }, + ErrorCheck: acctest.ErrorCheck(t, names.EC2), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckVerifiedAccessEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccVerifiedAccessEndpointConfig_policyBase(rName, acctest.TLSPEMEscapeNewlines(key), acctest.TLSPEMEscapeNewlines(certificate)), + Check: resource.ComposeTestCheckFunc( + testAccCheckVerifiedAccessEndpointExists(ctx, resourceName, &v), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "endpoint_domain_prefix", + }, + }, + { + Config: testAccVerifiedAccessEndpointConfig_policyUpdate(rName, acctest.TLSPEMEscapeNewlines(key), acctest.TLSPEMEscapeNewlines(certificate), policyDoc), + Check: resource.ComposeTestCheckFunc( + testAccCheckVerifiedAccessEndpointExists(ctx, resourceName, &v), + resource.TestCheckResourceAttr(resourceName, "policy_document", policyDoc), + ), + }, + }, + }) +} + func testAccCheckVerifiedAccessEndpointDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).EC2Client(ctx) @@ -450,3 +493,44 @@ resource "aws_verifiedaccess_endpoint" "test" { `, rName, key, certificate, tagKey1, tagValue1, tagKey2, tagValue2)) } + +func testAccVerifiedAccessEndpointConfig_policyBase(rName, key, certificate string) string { + return acctest.ConfigCompose(testAccVerifiedAccessEndpointConfig_base(rName, key, certificate), ` +resource "aws_verifiedaccess_endpoint" "test" { + application_domain = "example.com" + attachment_type = "vpc" + description = "example" + domain_certificate_arn = aws_acm_certificate.test.arn + endpoint_domain_prefix = "example" + endpoint_type = "network-interface" + network_interface_options { + network_interface_id = aws_network_interface.test.id + port = 443 + protocol = "https" + } + security_group_ids = [aws_security_group.test.id] + verified_access_group_id = aws_verifiedaccess_group.test.id +} +`) +} + +func testAccVerifiedAccessEndpointConfig_policyUpdate(rName, key, certificate, policyDocument string) string { + return acctest.ConfigCompose(testAccVerifiedAccessEndpointConfig_base(rName, key, certificate), fmt.Sprintf(` +resource "aws_verifiedaccess_endpoint" "test" { + application_domain = "example.com" + attachment_type = "vpc" + description = "example" + domain_certificate_arn = aws_acm_certificate.test.arn + endpoint_domain_prefix = "example" + endpoint_type = "network-interface" + network_interface_options { + network_interface_id = aws_network_interface.test.id + port = 443 + protocol = "https" + } + policy_document = %[4]q + security_group_ids = [aws_security_group.test.id] + verified_access_group_id = aws_verifiedaccess_group.test.id +} +`, rName, key, certificate, policyDocument)) +} diff --git a/website/docs/r/verifiedaccess_endpoint.html.markdown b/website/docs/r/verifiedaccess_endpoint.html.markdown index eee034b4dcb..29bb5457ef7 100644 --- a/website/docs/r/verifiedaccess_endpoint.html.markdown +++ b/website/docs/r/verifiedaccess_endpoint.html.markdown @@ -70,6 +70,7 @@ The following arguments are optional: * `sse_specification` - (Optional) The options in use for server side encryption. * `load_balancer_options` - (Optional) The load balancer details. This parameter is required if the endpoint type is `load-balancer`. * `network_interface_options` - (Optional) The network interface details. This parameter is required if the endpoint type is `network-interface`. +* `policy_docment` - (Optional) The policy document that is associated with this resource. * `security_group_ids` - (Optional) List of the the security groups IDs to associate with the Verified Access endpoint. * `tags` - (Optional) Key-value tags for the Verified Access Endpoint. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. From 81ddc41614efc3699155f3dd56de05a7c9b4cebe Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sat, 4 Nov 2023 13:55:03 +0100 Subject: [PATCH 029/438] feat: added changelog --- .changelog/34264.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34264.txt diff --git a/.changelog/34264.txt b/.changelog/34264.txt new file mode 100644 index 00000000000..18b0b709d08 --- /dev/null +++ b/.changelog/34264.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_verifiedaccess_endpoint: Add `policy_document` argument +``` From e6ba495f23a0670efb730f34cfca8224ec6515e1 Mon Sep 17 00:00:00 2001 From: Drew Mullen Date: Mon, 30 Oct 2023 13:01:24 -0400 Subject: [PATCH 030/438] add updatePolicy parameter --- .changelog/CHANGEME.txt | 7 + internal/service/batch/compute_environment.go | 566 +++------------- .../batch/compute_environment_data_source.go | 8 +- .../compute_environment_data_source_test.go | 9 +- .../service/batch/compute_environment_test.go | 637 ++---------------- .../d/batch_compute_environment.html.markdown | 6 +- .../r/batch_compute_environment.html.markdown | 86 +-- 7 files changed, 208 insertions(+), 1111 deletions(-) create mode 100644 .changelog/CHANGEME.txt diff --git a/.changelog/CHANGEME.txt b/.changelog/CHANGEME.txt new file mode 100644 index 00000000000..521c87eaf7a --- /dev/null +++ b/.changelog/CHANGEME.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_batch_compute_environment: Add `update_policy` parameter +``` + +```release-note:enhancement +data-source/aws_batch_compute_environment: Add `update_policy` attribute +``` \ No newline at end of file diff --git a/internal/service/batch/compute_environment.go b/internal/service/batch/compute_environment.go index 0618b550c37..34c4a972716 100644 --- a/internal/service/batch/compute_environment.go +++ b/internal/service/batch/compute_environment.go @@ -1,22 +1,15 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package batch import ( "context" - "errors" "fmt" "log" "strings" - "time" - "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -26,11 +19,8 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" - "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_batch_compute_environment", name="Compute Environment") -// @Tags(identifierAttribute="arn") func ResourceComputeEnvironment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceComputeEnvironmentCreate, @@ -79,6 +69,7 @@ func ResourceComputeEnvironment() *schema.Resource { "allocation_strategy": { Type: schema.TypeString, Optional: true, + ForceNew: true, StateFunc: func(val interface{}) string { return strings.ToUpper(val.(string)) }, @@ -87,6 +78,7 @@ func ResourceComputeEnvironment() *schema.Resource { "bid_percentage": { Type: schema.TypeInt, Optional: true, + ForceNew: true, }, "desired_vcpus": { Type: schema.TypeInt, @@ -98,18 +90,20 @@ func ResourceComputeEnvironment() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - MaxItems: 2, + MaxItems: 1, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "image_id_override": { Type: schema.TypeString, Optional: true, Computed: true, + ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 256), }, "image_type": { Type: schema.TypeString, Optional: true, + ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 256), }, }, @@ -118,19 +112,23 @@ func ResourceComputeEnvironment() *schema.Resource { "ec2_key_pair": { Type: schema.TypeString, Optional: true, + ForceNew: true, }, "image_id": { Type: schema.TypeString, Optional: true, + ForceNew: true, }, "instance_role": { Type: schema.TypeString, Optional: true, + ForceNew: true, ValidateFunc: verify.ValidARN, }, "instance_type": { Type: schema.TypeSet, Optional: true, + ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "launch_template": { @@ -143,17 +141,19 @@ func ResourceComputeEnvironment() *schema.Resource { "launch_template_id": { Type: schema.TypeString, Optional: true, + ForceNew: true, ConflictsWith: []string{"compute_resources.0.launch_template.0.launch_template_name"}, }, "launch_template_name": { Type: schema.TypeString, Optional: true, + ForceNew: true, ConflictsWith: []string{"compute_resources.0.launch_template.0.launch_template_id"}, }, "version": { Type: schema.TypeString, Optional: true, - Computed: true, + ForceNew: true, }, }, }, @@ -166,11 +166,6 @@ func ResourceComputeEnvironment() *schema.Resource { Type: schema.TypeInt, Optional: true, }, - "placement_group": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - }, "security_group_ids": { Type: schema.TypeSet, Optional: true, @@ -187,10 +182,11 @@ func ResourceComputeEnvironment() *schema.Resource { Required: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "tags": tftags.TagsSchema(), + "tags": tftags.TagsSchemaForceNew(), "type": { Type: schema.TypeString, Required: true, + ForceNew: true, StateFunc: func(val interface{}) string { return strings.ToUpper(val.(string)) }, @@ -248,8 +244,8 @@ func ResourceComputeEnvironment() *schema.Resource { Type: schema.TypeString, Computed: true, }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), + "tags": tftags.TagsSchema(), + "tags_all": tftags.TagsSchemaComputed(), "type": { Type: schema.TypeString, Required: true, @@ -265,19 +261,21 @@ func ResourceComputeEnvironment() *schema.Resource { func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn(ctx) + conn := meta.(*conns.AWSClient).BatchConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) computeEnvironmentName := create.Name(d.Get("compute_environment_name").(string), d.Get("compute_environment_name_prefix").(string)) computeEnvironmentType := d.Get("type").(string) + input := &batch.CreateComputeEnvironmentInput{ ComputeEnvironmentName: aws.String(computeEnvironmentName), ServiceRole: aws.String(d.Get("service_role").(string)), - Tags: getTagsIn(ctx), Type: aws.String(computeEnvironmentType), } if v, ok := d.GetOk("compute_resources"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.ComputeResources = expandComputeResource(ctx, v.([]interface{})[0].(map[string]interface{})) + input.ComputeResources = expandComputeResource(v.([]interface{})[0].(map[string]interface{})) } if v, ok := d.GetOk("eks_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -288,6 +286,11 @@ func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceDat input.State = aws.String(v.(string)) } + if len(tags) > 0 { + input.Tags = Tags(tags.IgnoreAWS()) + } + + log.Printf("[DEBUG] Creating Batch Compute Environment: %s", input) output, err := conn.CreateComputeEnvironmentWithContext(ctx, input) if err != nil { @@ -305,9 +308,11 @@ func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceDat func resourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn(ctx) + conn := meta.(*conns.AWSClient).BatchConn() + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig - computeEnvironment, err := findComputeEnvironmentDetailByName(ctx, conn, d.Id()) + computeEnvironment, err := FindComputeEnvironmentDetailByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Batch Compute Environment (%s) not found, removing from state", d.Id()) @@ -324,14 +329,21 @@ func resourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, d.Set("arn", computeEnvironment.ComputeEnvironmentArn) d.Set("compute_environment_name", computeEnvironment.ComputeEnvironmentName) d.Set("compute_environment_name_prefix", create.NamePrefixFromName(aws.StringValue(computeEnvironment.ComputeEnvironmentName))) + d.Set("ecs_cluster_arn", computeEnvironment.EcsClusterArn) + d.Set("service_role", computeEnvironment.ServiceRole) + d.Set("state", computeEnvironment.State) + d.Set("status", computeEnvironment.Status) + d.Set("status_reason", computeEnvironment.StatusReason) + d.Set("type", computeEnvironmentType) + if computeEnvironment.ComputeResources != nil { - if err := d.Set("compute_resources", []interface{}{flattenComputeResource(ctx, computeEnvironment.ComputeResources)}); err != nil { + if err := d.Set("compute_resources", []interface{}{flattenComputeResource(computeEnvironment.ComputeResources)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting compute_resources: %s", err) } } else { d.Set("compute_resources", nil) } - d.Set("ecs_cluster_arn", computeEnvironment.EcsClusterArn) + if computeEnvironment.EksConfiguration != nil { if err := d.Set("eks_configuration", []interface{}{flattenEKSConfiguration(computeEnvironment.EksConfiguration)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting eks_configuration: %s", err) @@ -339,20 +351,24 @@ func resourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, } else { d.Set("eks_configuration", nil) } - d.Set("service_role", computeEnvironment.ServiceRole) - d.Set("state", computeEnvironment.State) - d.Set("status", computeEnvironment.Status) - d.Set("status_reason", computeEnvironment.StatusReason) - d.Set("type", computeEnvironmentType) - setTagsOut(ctx, computeEnvironment.Tags) + tags := KeyValueTags(computeEnvironment.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) + + //lintignore:AWSR002 + if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + } + + if err := d.Set("tags_all", tags.Map()); err != nil { + return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) + } return diags } func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn(ctx) + conn := meta.(*conns.AWSClient).BatchConn() if d.HasChangesExcept("tags", "tags_all") { input := &batch.UpdateComputeEnvironmentInput{ @@ -373,102 +389,20 @@ func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceDat MaxvCpus: aws.Int64(int64(d.Get("compute_resources.0.max_vcpus").(int))), } - if d.HasChange("compute_resources.0.security_group_ids") { - computeResourceUpdate.SecurityGroupIds = flex.ExpandStringSet(d.Get("compute_resources.0.security_group_ids").(*schema.Set)) - } - - if d.HasChange("compute_resources.0.subnets") { - computeResourceUpdate.Subnets = flex.ExpandStringSet(d.Get("compute_resources.0.subnets").(*schema.Set)) + if d.HasChange("compute_resources.0.desired_vcpus") { + computeResourceUpdate.DesiredvCpus = aws.Int64(int64(d.Get("compute_resources.0.desired_vcpus").(int))) } - if d.HasChange("compute_resources.0.allocation_strategy") { - if allocationStrategy, ok := d.GetOk("compute_resources.0.allocation_strategy"); ok { - computeResourceUpdate.AllocationStrategy = aws.String(allocationStrategy.(string)) - } else { - computeResourceUpdate.AllocationStrategy = aws.String("") - } + if d.HasChange("compute_resources.0.min_vcpus") { + computeResourceUpdate.MinvCpus = aws.Int64(int64(d.Get("compute_resources.0.min_vcpus").(int))) } - computeResourceEnvironmentType := d.Get("compute_resources.0.type").(string) - - if d.HasChange("compute_resources.0.type") { - computeResourceUpdate.Type = aws.String(computeResourceEnvironmentType) + if d.HasChange("compute_resources.0.security_group_ids") { + computeResourceUpdate.SecurityGroupIds = flex.ExpandStringSet(d.Get("compute_resources.0.security_group_ids").(*schema.Set)) } - if !isFargateType(computeResourceEnvironmentType) { - if d.HasChange("compute_resources.0.desired_vcpus") { - if desiredvCpus, ok := d.GetOk("compute_resources.0.desired_vcpus"); ok { - computeResourceUpdate.DesiredvCpus = aws.Int64(int64(desiredvCpus.(int))) - } else { - computeResourceUpdate.DesiredvCpus = aws.Int64(0) - } - } - - if d.HasChange("compute_resources.0.min_vcpus") { - if minVcpus, ok := d.GetOk("compute_resources.0.min_vcpus"); ok { - computeResourceUpdate.MinvCpus = aws.Int64(int64(minVcpus.(int))) - } else { - computeResourceUpdate.MinvCpus = aws.Int64(0) - } - } - - if d.HasChange("compute_resources.0.bid_percentage") { - if bidPercentage, ok := d.GetOk("compute_resources.0.bid_percentage"); ok { - computeResourceUpdate.BidPercentage = aws.Int64(int64(bidPercentage.(int))) - } else { - computeResourceUpdate.BidPercentage = aws.Int64(0) - } - } - - if d.HasChange("compute_resources.0.ec2_configuration") { - defaultImageType := "ECS_AL2" - if _, ok := d.GetOk("eks_configuration.#"); ok { - defaultImageType = "EKS_AL2" - } - ec2Configuration := d.Get("compute_resources.0.ec2_configuration").([]interface{}) - computeResourceUpdate.Ec2Configuration = expandEC2ConfigurationsUpdate(ec2Configuration, defaultImageType) - } - - if d.HasChange("compute_resources.0.ec2_key_pair") { - if keyPair, ok := d.GetOk("compute_resources.0.ec2_key_pair"); ok { - computeResourceUpdate.Ec2KeyPair = aws.String(keyPair.(string)) - } else { - computeResourceUpdate.Ec2KeyPair = aws.String("") - } - } - - if d.HasChange("compute_resources.0.image_id") { - if imageId, ok := d.GetOk("compute_resources.0.image_id"); ok { - computeResourceUpdate.ImageId = aws.String(imageId.(string)) - } else { - computeResourceUpdate.ImageId = aws.String("") - } - } - - if d.HasChange("compute_resources.0.instance_role") { - if instanceRole, ok := d.GetOk("compute_resources.0.instance_role"); ok { - computeResourceUpdate.InstanceRole = aws.String(instanceRole.(string)) - } else { - computeResourceUpdate.InstanceRole = aws.String("") - } - } - - if d.HasChange("compute_resources.0.instance_type") { - computeResourceUpdate.InstanceTypes = flex.ExpandStringSet(d.Get("compute_resources.0.instance_type").(*schema.Set)) - } - - if d.HasChange("compute_resources.0.launch_template") { - launchTemplate := d.Get("compute_resources.0.launch_template").([]interface{}) - computeResourceUpdate.LaunchTemplate = expandLaunchTemplateSpecificationUpdate(launchTemplate) - } - - if d.HasChange("compute_resources.0.tags") { - if tags, ok := d.GetOk("compute_resources.0.tags"); ok { - computeResourceUpdate.Tags = Tags(tftags.New(ctx, tags.(map[string]interface{})).IgnoreAWS()) - } else { - computeResourceUpdate.Tags = aws.StringMap(map[string]string{}) - } - } + if d.HasChange("compute_resources.0.subnets") { + computeResourceUpdate.Subnets = flex.ExpandStringSet(d.Get("compute_resources.0.subnets").(*schema.Set)) } input.ComputeResources = computeResourceUpdate @@ -484,12 +418,20 @@ func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceDat } } + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { + return sdkdiag.AppendErrorf(diags, "updating tags: %s", err) + } + } + return append(diags, resourceComputeEnvironmentRead(ctx, d, meta)...) } func resourceComputeEnvironmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn(ctx) + conn := meta.(*conns.AWSClient).BatchConn() log.Printf("[DEBUG] Disabling Batch Compute Environment: %s", d.Id()) { @@ -536,303 +478,29 @@ func resourceComputeEnvironmentCustomizeDiff(_ context.Context, diff *schema.Res if diff.Id() != "" { // Update. - fargateComputeResources := isFargateType(diff.Get("compute_resources.0.type").(string)) - - if !isUpdatableComputeEnvironment(diff) { - if diff.HasChange("compute_resources.0.security_group_ids") && !fargateComputeResources { - if err := diff.ForceNew("compute_resources.0.security_group_ids"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.subnets") && !fargateComputeResources { - if err := diff.ForceNew("compute_resources.0.subnets"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.allocation_strategy") { - if err := diff.ForceNew("compute_resources.0.allocation_strategy"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.bid_percentage") { - if err := diff.ForceNew("compute_resources.0.bid_percentage"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.ec2_configuration.#") { - if err := diff.ForceNew("compute_resources.0.ec2_configuration.#"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.ec2_configuration.0.image_id_override") { - if err := diff.ForceNew("compute_resources.0.ec2_configuration.0.image_id_override"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.ec2_configuration.0.image_type") { - if err := diff.ForceNew("compute_resources.0.ec2_configuration.0.image_type"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.ec2_key_pair") { - if err := diff.ForceNew("compute_resources.0.ec2_key_pair"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.image_id") { - if err := diff.ForceNew("compute_resources.0.image_id"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.instance_role") { - if err := diff.ForceNew("compute_resources.0.instance_role"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.instance_type") { - if err := diff.ForceNew("compute_resources.0.instance_type"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.launch_template.#") { - if err := diff.ForceNew("compute_resources.0.launch_template.#"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.launch_template.0.launch_template_id") { - if err := diff.ForceNew("compute_resources.0.launch_template.0.launch_template_id"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.launch_template.0.launch_template_name") { - if err := diff.ForceNew("compute_resources.0.launch_template.0.launch_template_name"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.launch_template.0.version") { - if err := diff.ForceNew("compute_resources.0.launch_template.0.version"); err != nil { - return err - } - } - - if diff.HasChange("compute_resources.0.tags") { - if err := diff.ForceNew("compute_resources.0.tags"); err != nil { - return err - } - } - } - } - - return nil -} - -func findComputeEnvironmentDetailByName(ctx context.Context, conn *batch.Batch, name string) (*batch.ComputeEnvironmentDetail, error) { - input := &batch.DescribeComputeEnvironmentsInput{ - ComputeEnvironments: aws.StringSlice([]string{name}), - } - - output, err := findComputeEnvironmentDetail(ctx, conn, input) - - if err != nil { - return nil, err - } - - if status := aws.StringValue(output.Status); status == batch.CEStatusDeleted { - return nil, &retry.NotFoundError{ - Message: status, - LastRequest: input, + computeResourceType := strings.ToUpper(diff.Get("compute_resources.0.type").(string)) + fargateComputeResources := false + if computeResourceType == batch.CRTypeFargate || computeResourceType == batch.CRTypeFargateSpot { + fargateComputeResources = true } - } - - return output, nil -} - -func findComputeEnvironmentDetail(ctx context.Context, conn *batch.Batch, input *batch.DescribeComputeEnvironmentsInput) (*batch.ComputeEnvironmentDetail, error) { - output, err := conn.DescribeComputeEnvironmentsWithContext(ctx, input) - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return tfresource.AssertSinglePtrResult(output.ComputeEnvironments) -} - -func statusComputeEnvironment(ctx context.Context, conn *batch.Batch, name string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - computeEnvironmentDetail, err := findComputeEnvironmentDetailByName(ctx, conn, name) - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return computeEnvironmentDetail, aws.StringValue(computeEnvironmentDetail.Status), nil - } -} - -func waitComputeEnvironmentCreated(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{batch.CEStatusCreating}, - Target: []string{batch.CEStatusValid}, - Refresh: statusComputeEnvironment(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { - if status := aws.StringValue(output.Status); status == batch.CEStatusInvalid { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) - } - - return output, err - } - - return nil, err -} - -func waitComputeEnvironmentDeleted(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{batch.CEStatusDeleting}, - Target: []string{}, - Refresh: statusComputeEnvironment(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { - if status := aws.StringValue(output.Status); status == batch.CEStatusInvalid { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) - } - - return output, err - } - - return nil, err -} - -func waitComputeEnvironmentDisabled(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{batch.CEStatusUpdating}, - Target: []string{batch.CEStatusValid}, - Refresh: statusComputeEnvironment(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { - if status := aws.StringValue(output.Status); status == batch.CEStatusInvalid { - tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + if diff.HasChange("compute_resources.0.security_group_ids") && !fargateComputeResources { + if err := diff.ForceNew("compute_resources.0.security_group_ids"); err != nil { + return err + } } - return output, err - } - - return nil, err -} - -func waitComputeEnvironmentUpdated(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{batch.CEStatusUpdating}, - Target: []string{batch.CEStatusValid}, - Refresh: statusComputeEnvironment(ctx, conn, name), - Timeout: timeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if v, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { - return v, err - } - - return nil, err -} - -func isFargateType(computeResourceType string) bool { - if computeResourceType == batch.CRTypeFargate || computeResourceType == batch.CRTypeFargateSpot { - return true - } - return false -} - -func isUpdatableComputeEnvironment(diff *schema.ResourceDiff) bool { - if !isServiceLinkedRoleDiff(diff) { - return false - } - if !isUpdatableAllocationStrategyDiff(diff) { - return false - } - return true -} - -func isServiceLinkedRoleDiff(diff *schema.ResourceDiff) bool { - var before, after string - if diff.HasChange("service_role") { - beforeRaw, afterRaw := diff.GetChange("service_role") - before, _ = beforeRaw.(string) - after, _ := afterRaw.(string) - return isServiceLinkedRole(before) && isServiceLinkedRole(after) - } - afterRaw, _ := diff.GetOk("service_role") - after, _ = afterRaw.(string) - return isServiceLinkedRole(after) -} - -func isServiceLinkedRole(roleArn string) bool { - if roleArn == "" { - // Empty role ARN defaults to AWS service-linked role - return true - } - re := regexache.MustCompile(`arn:[^:]+:iam::\d{12}:role/aws-service-role/batch\.amazonaws\.com/*`) - return re.MatchString(roleArn) -} - -func isUpdatableAllocationStrategyDiff(diff *schema.ResourceDiff) bool { - var before, after string - if computeResourcesCount, ok := diff.Get("compute_resources.#").(int); ok { - if computeResourcesCount > 0 { - if diff.HasChange("compute_resources.0.allocation_strategy") { - beforeRaw, afterRaw := diff.GetChange("compute_resources.0.allocation_strategy") - before, _ = beforeRaw.(string) - after, _ = afterRaw.(string) - return isUpdatableAllocationStrategy(before) && isUpdatableAllocationStrategy(after) + if diff.HasChange("compute_resources.0.subnets") && !fargateComputeResources { + if err := diff.ForceNew("compute_resources.0.subnets"); err != nil { + return err } - afterRaw, _ := diff.GetOk("compute_resources.0.allocation_strategy") - after, _ := afterRaw.(string) - return isUpdatableAllocationStrategy(after) } } - return false -} -func isUpdatableAllocationStrategy(allocationStrategy string) bool { - return allocationStrategy == batch.CRAllocationStrategyBestFitProgressive || allocationStrategy == batch.CRAllocationStrategySpotCapacityOptimized + return nil } -func expandComputeResource(ctx context.Context, tfMap map[string]interface{}) *batch.ComputeResource { +func expandComputeResource(tfMap map[string]interface{}) *batch.ComputeResource { if tfMap == nil { return nil } @@ -877,7 +545,7 @@ func expandComputeResource(ctx context.Context, tfMap map[string]interface{}) *b apiObject.InstanceTypes = flex.ExpandStringSet(v) } - if v, ok := tfMap["launch_template"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if v, ok := tfMap["launch_template"].([]interface{}); ok && len(v) > 0 { apiObject.LaunchTemplate = expandLaunchTemplateSpecification(v[0].(map[string]interface{})) } @@ -891,10 +559,6 @@ func expandComputeResource(ctx context.Context, tfMap map[string]interface{}) *b apiObject.MinvCpus = aws.Int64(0) } - if v, ok := tfMap["placement_group"].(string); ok && v != "" { - apiObject.PlacementGroup = aws.String(v) - } - if v, ok := tfMap["security_group_ids"].(*schema.Set); ok && v.Len() > 0 { apiObject.SecurityGroupIds = flex.ExpandStringSet(v) } @@ -908,7 +572,7 @@ func expandComputeResource(ctx context.Context, tfMap map[string]interface{}) *b } if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.Tags = Tags(tftags.New(ctx, v).IgnoreAWS()) + apiObject.Tags = Tags(tftags.New(v).IgnoreAWS()) } if computeResourceType != "" { @@ -1002,65 +666,7 @@ func expandLaunchTemplateSpecification(tfMap map[string]interface{}) *batch.Laun return apiObject } -func expandEC2ConfigurationsUpdate(tfList []interface{}, defaultImageType string) []*batch.Ec2Configuration { - if len(tfList) == 0 { - return []*batch.Ec2Configuration{ - { - ImageType: aws.String(defaultImageType), - }, - } - } - - var apiObjects []*batch.Ec2Configuration - - for _, tfMapRaw := range tfList { - tfMap, ok := tfMapRaw.(map[string]interface{}) - - if !ok { - continue - } - - apiObject := expandEC2Configuration(tfMap) - - if apiObject == nil { - continue - } - - apiObjects = append(apiObjects, apiObject) - } - - return apiObjects -} - -func expandLaunchTemplateSpecificationUpdate(tfList []interface{}) *batch.LaunchTemplateSpecification { - if len(tfList) == 0 || tfList[0] == nil { - // delete any existing launch template configuration - return &batch.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String(""), - } - } - - tfMap := tfList[0].(map[string]interface{}) - apiObject := &batch.LaunchTemplateSpecification{} - - if v, ok := tfMap["launch_template_id"].(string); ok && v != "" { - apiObject.LaunchTemplateId = aws.String(v) - } - - if v, ok := tfMap["launch_template_name"].(string); ok && v != "" { - apiObject.LaunchTemplateName = aws.String(v) - } - - if v, ok := tfMap["version"].(string); ok { - apiObject.Version = aws.String(v) - } else { - apiObject.Version = aws.String("") - } - - return apiObject -} - -func flattenComputeResource(ctx context.Context, apiObject *batch.ComputeResource) map[string]interface{} { +func flattenComputeResource(apiObject *batch.ComputeResource) map[string]interface{} { if apiObject == nil { return nil } @@ -1111,10 +717,6 @@ func flattenComputeResource(ctx context.Context, apiObject *batch.ComputeResourc tfMap["min_vcpus"] = aws.Int64Value(v) } - if v := apiObject.PlacementGroup; v != nil { - tfMap["placement_group"] = aws.StringValue(v) - } - if v := apiObject.SecurityGroupIds; v != nil { tfMap["security_group_ids"] = aws.StringValueSlice(v) } @@ -1128,7 +730,7 @@ func flattenComputeResource(ctx context.Context, apiObject *batch.ComputeResourc } if v := apiObject.Tags; v != nil { - tfMap["tags"] = KeyValueTags(ctx, v).IgnoreAWS().Map() + tfMap["tags"] = KeyValueTags(v).IgnoreAWS().Map() } if v := apiObject.Type; v != nil { diff --git a/internal/service/batch/compute_environment_data_source.go b/internal/service/batch/compute_environment_data_source.go index 7163667f07c..3c4fc60db39 100644 --- a/internal/service/batch/compute_environment_data_source.go +++ b/internal/service/batch/compute_environment_data_source.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package batch import ( @@ -15,7 +12,6 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) -// @SDKDataSource("aws_batch_compute_environment") func DataSourceComputeEnvironment() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceComputeEnvironmentRead, @@ -68,7 +64,7 @@ func DataSourceComputeEnvironment() *schema.Resource { func dataSourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn(ctx) + conn := meta.(*conns.AWSClient).BatchConn() ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig params := &batch.DescribeComputeEnvironmentsInput{ @@ -97,7 +93,7 @@ func dataSourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceDat d.Set("status_reason", computeEnvironment.StatusReason) d.Set("state", computeEnvironment.State) - if err := d.Set("tags", KeyValueTags(ctx, computeEnvironment.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + if err := d.Set("tags", KeyValueTags(computeEnvironment.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } diff --git a/internal/service/batch/compute_environment_data_source_test.go b/internal/service/batch/compute_environment_data_source_test.go index 6f35d54e2d5..7e95ac96b16 100644 --- a/internal/service/batch/compute_environment_data_source_test.go +++ b/internal/service/batch/compute_environment_data_source_test.go @@ -1,6 +1,3 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package batch_test import ( @@ -8,8 +5,8 @@ import ( "testing" "github.com/aws/aws-sdk-go/service/batch" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" ) @@ -20,7 +17,7 @@ func TestAccBatchComputeEnvironmentDataSource_basic(t *testing.T) { datasourceName := "data.aws_batch_compute_environment.by_name" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ diff --git a/internal/service/batch/compute_environment_test.go b/internal/service/batch/compute_environment_test.go index 8b520e66486..b326d9d41e4 100644 --- a/internal/service/batch/compute_environment_test.go +++ b/internal/service/batch/compute_environment_test.go @@ -1,159 +1,21 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - package batch_test import ( "context" "fmt" + "regexp" "testing" - "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" - "github.com/google/go-cmp/cmp" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" + sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfbatch "github.com/hashicorp/terraform-provider-aws/internal/service/batch" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) -func TestExpandEC2ConfigurationsUpdate(t *testing.T) { - t.Parallel() - - //lintignore:AWSAT002 - testCases := []struct { - flattened []interface{} - expected []*batch.Ec2Configuration - }{ - { - flattened: []interface{}{}, - expected: []*batch.Ec2Configuration{ - { - ImageType: aws.String("default"), - }, - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "image_type": "ECS_AL1", - }, - }, - expected: []*batch.Ec2Configuration{ - { - ImageType: aws.String("ECS_AL1"), - }, - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "image_id_override": "ami-deadbeef", - }, - }, - expected: []*batch.Ec2Configuration{ - { - ImageIdOverride: aws.String("ami-deadbeef"), - }, - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "image_id_override": "ami-deadbeef", - "image_type": "ECS_AL1", - }, - }, - expected: []*batch.Ec2Configuration{ - { - ImageIdOverride: aws.String("ami-deadbeef"), - ImageType: aws.String("ECS_AL1"), - }, - }, - }, - } - - for _, testCase := range testCases { - expanded := tfbatch.ExpandEC2ConfigurationsUpdate(testCase.flattened, "default") - if diff := cmp.Diff(expanded, testCase.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } - } -} - -func TestExpandLaunchTemplateSpecificationUpdate(t *testing.T) { - t.Parallel() - - testCases := []struct { - flattened []interface{} - expected *batch.LaunchTemplateSpecification - }{ - { - flattened: []interface{}{}, - expected: &batch.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String(""), - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "launch_template_id": "lt-123456", - }, - }, - expected: &batch.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("lt-123456"), - Version: aws.String(""), - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "launch_template_name": "my-launch-template", - }, - }, - expected: &batch.LaunchTemplateSpecification{ - LaunchTemplateName: aws.String("my-launch-template"), - Version: aws.String(""), - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "launch_template_id": "lt-123456", - "version": "$LATEST", - }, - }, - expected: &batch.LaunchTemplateSpecification{ - LaunchTemplateId: aws.String("lt-123456"), - Version: aws.String("$LATEST"), - }, - }, - { - flattened: []interface{}{ - map[string]interface{}{ - "launch_template_name": "my-launch-template", - "version": "$LATEST", - }, - }, - expected: &batch.LaunchTemplateSpecification{ - LaunchTemplateName: aws.String("my-launch-template"), - Version: aws.String("$LATEST"), - }, - }, - } - - for _, testCase := range testCases { - expanded := tfbatch.ExpandLaunchTemplateSpecificationUpdate(testCase.flattened) - if diff := cmp.Diff(expanded, testCase.expected); diff != "" { - t.Errorf("unexpected diff (+wanted, -got): %s", diff) - } - } -} - func TestAccBatchComputeEnvironment_basic(t *testing.T) { ctx := acctest.Context(t) var ce batch.ComputeEnvironmentDetail @@ -162,7 +24,7 @@ func TestAccBatchComputeEnvironment_basic(t *testing.T) { serviceRoleResourceName := "aws_iam_role.batch_service" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -196,7 +58,7 @@ func TestAccBatchComputeEnvironment_disappears(t *testing.T) { resourceName := "aws_batch_compute_environment.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -220,7 +82,7 @@ func TestAccBatchComputeEnvironment_nameGenerated(t *testing.T) { resourceName := "aws_batch_compute_environment.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -249,7 +111,7 @@ func TestAccBatchComputeEnvironment_namePrefix(t *testing.T) { resourceName := "aws_batch_compute_environment.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -279,7 +141,7 @@ func TestAccBatchComputeEnvironment_eksConfiguration(t *testing.T) { eksClusterResourceName := "aws_eks_cluster.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, ExternalProviders: map[string]resource.ExternalProvider{ @@ -314,7 +176,7 @@ func TestAccBatchComputeEnvironment_createEC2(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -340,7 +202,6 @@ func TestAccBatchComputeEnvironment_createEC2(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "compute_resources.0.launch_template.#", "0"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.max_vcpus", "16"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.min_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.placement_group", ""), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.security_group_ids.#", "1"), resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.security_group_ids.*", securityGroupResourceName, "id"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.spot_iam_fleet_role", ""), @@ -384,7 +245,7 @@ func TestAccBatchComputeEnvironment_CreateEC2DesiredVCPUsEC2KeyPairImageID_compu } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -446,7 +307,7 @@ func TestAccBatchComputeEnvironment_createSpot(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -507,7 +368,7 @@ func TestAccBatchComputeEnvironment_CreateSpotAllocationStrategy_bidPercentage(t subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -566,7 +427,7 @@ func TestAccBatchComputeEnvironment_createFargate(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -624,7 +485,7 @@ func TestAccBatchComputeEnvironment_createFargateSpot(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -680,7 +541,7 @@ func TestAccBatchComputeEnvironment_updateState(t *testing.T) { serviceRoleResourceName := "aws_iam_role.batch_service" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -739,7 +600,7 @@ func TestAccBatchComputeEnvironment_updateServiceRole(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -831,9 +692,9 @@ func TestAccBatchComputeEnvironment_defaultServiceRole(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { - acctest.PreCheck(ctx, t) + acctest.PreCheck(t) testAccPreCheck(ctx, t) - acctest.PreCheckIAMServiceLinkedRole(ctx, t, "/aws-service-role/batch") + acctest.PreCheckIAMServiceLinkedRole(t, "/aws-service-role/batch") }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -865,7 +726,7 @@ func TestAccBatchComputeEnvironment_defaultServiceRole(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "compute_resources.0.tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.type", "FARGATE"), resource.TestCheckResourceAttrSet(resourceName, "ecs_cluster_arn"), - acctest.MatchResourceAttrGlobalARN(resourceName, "service_role", "iam", regexache.MustCompile(`role/aws-service-role/batch`)), + acctest.MatchResourceAttrGlobalARN(resourceName, "service_role", "iam", regexp.MustCompile(`role/aws-service-role/batch`)), resource.TestCheckResourceAttr(resourceName, "state", "ENABLED"), resource.TestCheckResourceAttrSet(resourceName, "status"), resource.TestCheckResourceAttrSet(resourceName, "status_reason"), @@ -893,7 +754,7 @@ func TestAccBatchComputeEnvironment_ComputeResources_minVCPUs(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1023,7 +884,7 @@ func TestAccBatchComputeEnvironment_ComputeResources_maxVCPUs(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1154,7 +1015,7 @@ func TestAccBatchComputeEnvironment_ec2Configuration(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1175,79 +1036,11 @@ func TestAccBatchComputeEnvironment_ec2Configuration(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.instance_role", instanceProfileResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "optimal"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "2"), - resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.0.image_id_override"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), - resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.1.image_id_override"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.1.image_type", "ECS_AL2_NVIDIA"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.max_vcpus", "16"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.min_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.security_group_ids.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.security_group_ids.*", securityGroupResourceName, "id"), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.spot_iam_fleet_role", spotFleetRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.subnets.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.subnets.*", subnetResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.type", "SPOT"), - resource.TestCheckResourceAttrSet(resourceName, "ecs_cluster_arn"), - resource.TestCheckResourceAttrPair(resourceName, "service_role", serviceRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "state", "ENABLED"), - resource.TestCheckResourceAttrSet(resourceName, "status"), - resource.TestCheckResourceAttrSet(resourceName, "status_reason"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "type", "MANAGED"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - -func TestAccBatchComputeEnvironment_ec2ConfigurationPlacementGroup(t *testing.T) { - ctx := acctest.Context(t) - var ce batch.ComputeEnvironmentDetail - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_batch_compute_environment.test" - instanceProfileResourceName := "aws_iam_instance_profile.ecs_instance" - securityGroupResourceName := "aws_security_group.test" - serviceRoleResourceName := "aws_iam_role.batch_service" - spotFleetRoleResourceName := "aws_iam_role.ec2_spot_fleet" - subnetResourceName := "aws_subnet.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccComputeEnvironmentConfig_ec2ConfigurationPlacementGroup(rName), - Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "batch", fmt.Sprintf("compute-environment/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name", rName), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name_prefix", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.allocation_strategy", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.bid_percentage", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.desired_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_key_pair", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.image_id", ""), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.instance_role", instanceProfileResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "optimal"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "2"), + resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "1"), resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.0.image_id_override"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), - resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.1.image_id_override"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.1.image_type", "ECS_AL2_NVIDIA"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.max_vcpus", "16"), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.min_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.placement_group", rName), resource.TestCheckResourceAttr(resourceName, "compute_resources.0.security_group_ids.#", "1"), resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.security_group_ids.*", securityGroupResourceName, "id"), resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.spot_iam_fleet_role", spotFleetRoleResourceName, "arn"), @@ -1285,7 +1078,7 @@ func TestAccBatchComputeEnvironment_launchTemplate(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1349,7 +1142,7 @@ func TestAccBatchComputeEnvironment_updateLaunchTemplate(t *testing.T) { subnetResourceName := "aws_subnet.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1452,7 +1245,7 @@ func TestAccBatchComputeEnvironment_UpdateSecurityGroupsAndSubnets_fargate(t *te subnetResourceName2 := "aws_subnet.test_2" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1542,7 +1335,7 @@ func TestAccBatchComputeEnvironment_tags(t *testing.T) { resourceName := "aws_batch_compute_environment.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), @@ -1586,180 +1379,52 @@ func TestAccBatchComputeEnvironment_createUnmanagedWithComputeResources(t *testi rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccComputeEnvironmentConfig_unmanagedResources(rName), - ExpectError: regexache.MustCompile("no `compute_resources` can be specified when `type` is \"UNMANAGED\""), + ExpectError: regexp.MustCompile("no `compute_resources` can be specified when `type` is \"UNMANAGED\""), }, }, }) } -func TestAccBatchComputeEnvironment_updateEC2(t *testing.T) { +// Test plan time errors... + +func TestAccBatchComputeEnvironment_createEC2WithoutComputeResources(t *testing.T) { ctx := acctest.Context(t) - var ce batch.ComputeEnvironmentDetail rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_batch_compute_environment.test" - instanceProfileResourceName := "aws_iam_instance_profile.ecs_instance" - updatedInstanceProfileResourceName := "aws_iam_instance_profile.ecs_instance_2" - securityGroupResourceName := "aws_security_group.test" - updatedSecurityGroupResourceName := "aws_security_group.test_2" - subnetResourceName := "aws_subnet.test" - updatedSubnetResourceName := "aws_subnet.test_2" - ec2KeyPairResourceName := "aws_key_pair.test" - launchTemplateResourceName := "aws_launch_template.test" - spotFleetRoleResourceName := "aws_iam_role.ec2_spot_fleet" - publicKey, _, err := sdkacctest.RandSSHKeyPair(acctest.DefaultEmailAddress) - if err != nil { - t.Fatalf("error generating random SSH key: %s", err) - } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccComputeenvironmentConfig_ec2PreUpdate(rName, publicKey), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "batch", fmt.Sprintf("compute-environment/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name", rName), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name_prefix", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.allocation_strategy", "BEST_FIT_PROGRESSIVE"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.bid_percentage", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.desired_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_key_pair", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.image_id", ""), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.instance_role", instanceProfileResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "optimal"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.launch_template.#", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.max_vcpus", "16"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.min_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.security_group_ids.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.security_group_ids.*", securityGroupResourceName, "id"), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.spot_iam_fleet_role", spotFleetRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.subnets.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.subnets.*", subnetResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.type", "EC2"), - resource.TestCheckResourceAttrSet(resourceName, "ecs_cluster_arn"), - resource.TestCheckResourceAttr(resourceName, "state", "ENABLED"), - resource.TestCheckResourceAttrSet(resourceName, "status"), - resource.TestCheckResourceAttrSet(resourceName, "status_reason"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "type", "MANAGED"), - ), - }, - { - Config: testAccComputeenvironmentConfig_ec2Update(rName, publicKey), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "batch", fmt.Sprintf("compute-environment/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name", rName), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name_prefix", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.allocation_strategy", "SPOT_CAPACITY_OPTIMIZED"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.bid_percentage", "100"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.desired_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "1"), - resource.TestCheckResourceAttrSet(resourceName, "compute_resources.0.ec2_configuration.0.image_id_override"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.ec2_key_pair", ec2KeyPairResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.image_id", ""), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.instance_role", updatedInstanceProfileResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "c4.large"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.launch_template.#", "1"), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.launch_template.0.launch_template_id", launchTemplateResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.launch_template.0.version", "$Latest"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.max_vcpus", "16"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.min_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.security_group_ids.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.security_group_ids.*", updatedSecurityGroupResourceName, "id"), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.spot_iam_fleet_role", spotFleetRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.subnets.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.subnets.*", updatedSubnetResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.tags.%", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.tags.updated", "yes"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.type", "SPOT"), - resource.TestCheckResourceAttrSet(resourceName, "ecs_cluster_arn"), - resource.TestCheckResourceAttr(resourceName, "state", "ENABLED"), - resource.TestCheckResourceAttrSet(resourceName, "status"), - resource.TestCheckResourceAttrSet(resourceName, "status_reason"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "type", "MANAGED"), - ), - }, - { - Config: testAccComputeenvironmentConfig_ec2PreUpdate(rName, publicKey), - Check: resource.ComposeTestCheckFunc( - testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "batch", fmt.Sprintf("compute-environment/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name", rName), - resource.TestCheckResourceAttr(resourceName, "compute_environment_name_prefix", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.#", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.allocation_strategy", "BEST_FIT_PROGRESSIVE"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.bid_percentage", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.desired_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_configuration.0.image_type", "ECS_AL2"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.ec2_key_pair", ""), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.image_id", ""), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.instance_role", instanceProfileResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.instance_type.#", "1"), - resource.TestCheckTypeSetElemAttr(resourceName, "compute_resources.0.instance_type.*", "optimal"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.launch_template.#", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.max_vcpus", "16"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.min_vcpus", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.security_group_ids.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.security_group_ids.*", securityGroupResourceName, "id"), - resource.TestCheckResourceAttrPair(resourceName, "compute_resources.0.spot_iam_fleet_role", spotFleetRoleResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.subnets.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "compute_resources.0.subnets.*", subnetResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "compute_resources.0.type", "EC2"), - resource.TestCheckResourceAttrSet(resourceName, "ecs_cluster_arn"), - resource.TestCheckResourceAttr(resourceName, "state", "ENABLED"), - resource.TestCheckResourceAttrSet(resourceName, "status"), - resource.TestCheckResourceAttrSet(resourceName, "status_reason"), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttr(resourceName, "type", "MANAGED"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, + Config: testAccComputeEnvironmentConfig_ec2NoResources(rName), + ExpectError: regexp.MustCompile(`computeResources must be provided for a MANAGED compute environment`), }, }, }) } -// Test plan time errors... - -func TestAccBatchComputeEnvironment_createEC2WithoutComputeResources(t *testing.T) { +func TestAccBatchComputeEnvironment_createSpotWithoutIAMFleetRole(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccComputeEnvironmentConfig_ec2NoResources(rName), - ExpectError: regexache.MustCompile(`computeResources must be provided for a MANAGED compute environment`), + Config: testAccComputeEnvironmentConfig_spotNoIAMFleetRole(rName), + ExpectError: regexp.MustCompile(`ComputeResources.spotIamFleetRole cannot not be null or empty`), }, }, }) @@ -1767,7 +1432,7 @@ func TestAccBatchComputeEnvironment_createEC2WithoutComputeResources(t *testing. func testAccCheckComputeEnvironmentDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).BatchConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BatchConn() for _, rs := range s.RootModule().Resources { if rs.Type != "aws_batch_compute_environment" { @@ -1801,22 +1466,22 @@ func testAccCheckComputeEnvironmentExists(ctx context.Context, n string, v *batc return fmt.Errorf("No Batch Compute Environment ID is set") } - conn := acctest.Provider.Meta().(*conns.AWSClient).BatchConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BatchConn() - output, err := tfbatch.FindComputeEnvironmentDetailByName(ctx, conn, rs.Primary.ID) + computeEnvironment, err := tfbatch.FindComputeEnvironmentDetailByName(ctx, conn, rs.Primary.ID) if err != nil { return err } - *v = *output + *v = *computeEnvironment return nil } } func testAccPreCheck(ctx context.Context, t *testing.T) { - conn := acctest.Provider.Meta().(*conns.AWSClient).BatchConn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).BatchConn() input := &batch.DescribeComputeEnvironmentsInput{} @@ -2649,6 +2314,34 @@ resource "aws_batch_compute_environment" "test" { `, rName)) } +func testAccComputeEnvironmentConfig_spotNoIAMFleetRole(rName string) string { + return acctest.ConfigCompose(testAccComputeEnvironmentConfig_base(rName), fmt.Sprintf(` +resource "aws_batch_compute_environment" "test" { + compute_environment_name = %[1]q + + compute_resources { + instance_role = aws_iam_instance_profile.ecs_instance.arn + instance_type = [ + "c4.large", + ] + max_vcpus = 16 + min_vcpus = 0 + security_group_ids = [ + aws_security_group.test.id + ] + subnets = [ + aws_subnet.test.id + ] + type = "SPOT" + } + + service_role = aws_iam_role.batch_service.arn + type = "MANAGED" + depends_on = [aws_iam_role_policy_attachment.batch_service] +} +`, rName)) +} + func testAccComputeEnvironmentConfig_launchTemplate(rName string) string { return acctest.ConfigCompose(testAccComputeEnvironmentConfig_base(rName), fmt.Sprintf(` resource "aws_launch_template" "test" { @@ -2768,66 +2461,14 @@ resource "aws_batch_compute_environment" "test" { compute_resources { instance_role = aws_iam_instance_profile.ecs_instance.arn instance_type = ["optimal"] - - ec2_configuration { - image_id_override = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - image_type = "ECS_AL2" - } - - ec2_configuration { - image_id_override = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - image_type = "ECS_AL2_NVIDIA" - } - - max_vcpus = 16 - min_vcpus = 0 - - security_group_ids = [ - aws_security_group.test.id - ] - spot_iam_fleet_role = aws_iam_role.ec2_spot_fleet.arn - subnets = [ - aws_subnet.test.id - ] - type = "SPOT" - } - - service_role = aws_iam_role.batch_service.arn - type = "MANAGED" - depends_on = [aws_iam_role_policy_attachment.batch_service] -} -`, rName)) -} - -func testAccComputeEnvironmentConfig_ec2ConfigurationPlacementGroup(rName string) string { - return acctest.ConfigCompose(testAccComputeEnvironmentConfig_base(rName), acctest.ConfigLatestAmazonLinuxHVMEBSAMI(), fmt.Sprintf(` -resource "aws_placement_group" "test" { - name = %[1]q - strategy = "cluster" -} - -resource "aws_batch_compute_environment" "test" { - compute_environment_name = %[1]q - - compute_resources { - instance_role = aws_iam_instance_profile.ecs_instance.arn - instance_type = ["optimal"] - ec2_configuration { image_id_override = data.aws_ami.amzn-ami-minimal-hvm-ebs.id image_type = "ECS_AL2" } - ec2_configuration { - image_id_override = data.aws_ami.amzn-ami-minimal-hvm-ebs.id - image_type = "ECS_AL2_NVIDIA" - } - max_vcpus = 16 min_vcpus = 0 - placement_group = aws_placement_group.test.name - security_group_ids = [ aws_security_group.test.id ] @@ -2844,135 +2485,3 @@ resource "aws_batch_compute_environment" "test" { } `, rName)) } - -func testAccComputeEnvironmentConfig_baseForUpdates(rName string, publicKey string) string { - return fmt.Sprintf(` -resource "aws_iam_role" "ecs_instance_2" { - name = "%[1]s_ecs_instance_2" - - assume_role_policy = < Date: Mon, 30 Oct 2023 13:17:26 -0400 Subject: [PATCH 031/438] add updatePolicy attribute/param --- internal/service/batch/compute_environment.go | 632 ++++++++++-- .../batch/compute_environment_data_source.go | 28 +- .../compute_environment_data_source_test.go | 103 +- .../service/batch/compute_environment_test.go | 972 ++++++++++++++++-- .../d/batch_compute_environment.html.markdown | 7 +- .../r/batch_compute_environment.html.markdown | 124 ++- 6 files changed, 1582 insertions(+), 284 deletions(-) diff --git a/internal/service/batch/compute_environment.go b/internal/service/batch/compute_environment.go index 34c4a972716..2af0598d449 100644 --- a/internal/service/batch/compute_environment.go +++ b/internal/service/batch/compute_environment.go @@ -1,15 +1,22 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package batch import ( "context" + "errors" "fmt" "log" "strings" + "time" + "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/batch" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/customdiff" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -19,8 +26,11 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" ) +// @SDKResource("aws_batch_compute_environment", name="Compute Environment") +// @Tags(identifierAttribute="arn") func ResourceComputeEnvironment() *schema.Resource { return &schema.Resource{ CreateWithoutTimeout: resourceComputeEnvironmentCreate, @@ -69,7 +79,6 @@ func ResourceComputeEnvironment() *schema.Resource { "allocation_strategy": { Type: schema.TypeString, Optional: true, - ForceNew: true, StateFunc: func(val interface{}) string { return strings.ToUpper(val.(string)) }, @@ -78,7 +87,6 @@ func ResourceComputeEnvironment() *schema.Resource { "bid_percentage": { Type: schema.TypeInt, Optional: true, - ForceNew: true, }, "desired_vcpus": { Type: schema.TypeInt, @@ -90,20 +98,18 @@ func ResourceComputeEnvironment() *schema.Resource { Optional: true, Computed: true, ForceNew: true, - MaxItems: 1, + MaxItems: 2, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "image_id_override": { Type: schema.TypeString, Optional: true, Computed: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 256), }, "image_type": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 256), }, }, @@ -112,23 +118,19 @@ func ResourceComputeEnvironment() *schema.Resource { "ec2_key_pair": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "image_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, }, "instance_role": { Type: schema.TypeString, Optional: true, - ForceNew: true, ValidateFunc: verify.ValidARN, }, "instance_type": { Type: schema.TypeSet, Optional: true, - ForceNew: true, Elem: &schema.Schema{Type: schema.TypeString}, }, "launch_template": { @@ -141,19 +143,17 @@ func ResourceComputeEnvironment() *schema.Resource { "launch_template_id": { Type: schema.TypeString, Optional: true, - ForceNew: true, ConflictsWith: []string{"compute_resources.0.launch_template.0.launch_template_name"}, }, "launch_template_name": { Type: schema.TypeString, Optional: true, - ForceNew: true, ConflictsWith: []string{"compute_resources.0.launch_template.0.launch_template_id"}, }, "version": { Type: schema.TypeString, Optional: true, - ForceNew: true, + Computed: true, }, }, }, @@ -166,6 +166,11 @@ func ResourceComputeEnvironment() *schema.Resource { Type: schema.TypeInt, Optional: true, }, + "placement_group": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, "security_group_ids": { Type: schema.TypeSet, Optional: true, @@ -182,11 +187,10 @@ func ResourceComputeEnvironment() *schema.Resource { Required: true, Elem: &schema.Schema{Type: schema.TypeString}, }, - "tags": tftags.TagsSchemaForceNew(), + "tags": tftags.TagsSchema(), "type": { Type: schema.TypeString, Required: true, - ForceNew: true, StateFunc: func(val interface{}) string { return strings.ToUpper(val.(string)) }, @@ -244,8 +248,8 @@ func ResourceComputeEnvironment() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "tags": tftags.TagsSchema(), - "tags_all": tftags.TagsSchemaComputed(), + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), "type": { Type: schema.TypeString, Required: true, @@ -255,27 +259,42 @@ func ResourceComputeEnvironment() *schema.Resource { }, ValidateFunc: validation.StringInSlice(batch.CEType_Values(), true), }, + "update_policy": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_execution_timeout_minutes": { + Type: schema.TypeInt, + Required: true, + }, + "terminate_jobs_on_update": { + Type: schema.TypeBool, + Required: true, + }, + }, + }, + }, }, } } func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - tags := defaultTagsConfig.MergeTags(tftags.New(d.Get("tags").(map[string]interface{}))) + conn := meta.(*conns.AWSClient).BatchConn(ctx) computeEnvironmentName := create.Name(d.Get("compute_environment_name").(string), d.Get("compute_environment_name_prefix").(string)) computeEnvironmentType := d.Get("type").(string) - input := &batch.CreateComputeEnvironmentInput{ ComputeEnvironmentName: aws.String(computeEnvironmentName), ServiceRole: aws.String(d.Get("service_role").(string)), + Tags: getTagsIn(ctx), Type: aws.String(computeEnvironmentType), } if v, ok := d.GetOk("compute_resources"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.ComputeResources = expandComputeResource(v.([]interface{})[0].(map[string]interface{})) + input.ComputeResources = expandComputeResource(ctx, v.([]interface{})[0].(map[string]interface{})) } if v, ok := d.GetOk("eks_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -286,11 +305,6 @@ func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceDat input.State = aws.String(v.(string)) } - if len(tags) > 0 { - input.Tags = Tags(tags.IgnoreAWS()) - } - - log.Printf("[DEBUG] Creating Batch Compute Environment: %s", input) output, err := conn.CreateComputeEnvironmentWithContext(ctx, input) if err != nil { @@ -303,16 +317,31 @@ func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceDat return sdkdiag.AppendErrorf(diags, "waiting for Batch Compute Environment (%s) create: %s", d.Id(), err) } + // UpdatePolicy is not possible to set with CreateComputeEnvironment + if v, ok := d.GetOk("update_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + inputeUpdateOnCreate := &batch.UpdateComputeEnvironmentInput{ + ComputeEnvironment: aws.String(d.Id()), + UpdatePolicy: expandComputeEnvironmentUpdatePolicy(v.([]interface{})), + } + log.Printf("[DEBUG] Creating Batch Compute Environment extra arguments: %s", input) + + if _, err := conn.UpdateComputeEnvironmentWithContext(ctx, inputeUpdateOnCreate); err != nil { + return sdkdiag.AppendErrorf(diags, "Create Batch Compute Environment extra arguments through UpdateComputeEnvironment (%s): %s", d.Id(), err) + } + + if _, err := waitComputeEnvironmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "Create waiting for Batch Compute Environment (%s) extra arguments through UpdateComputeEnvironment: %s", d.Id(), err) + } + } + return append(diags, resourceComputeEnvironmentRead(ctx, d, meta)...) } func resourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn() - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig + conn := meta.(*conns.AWSClient).BatchConn(ctx) - computeEnvironment, err := FindComputeEnvironmentDetailByName(ctx, conn, d.Id()) + computeEnvironment, err := findComputeEnvironmentDetailByName(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] Batch Compute Environment (%s) not found, removing from state", d.Id()) @@ -329,21 +358,14 @@ func resourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, d.Set("arn", computeEnvironment.ComputeEnvironmentArn) d.Set("compute_environment_name", computeEnvironment.ComputeEnvironmentName) d.Set("compute_environment_name_prefix", create.NamePrefixFromName(aws.StringValue(computeEnvironment.ComputeEnvironmentName))) - d.Set("ecs_cluster_arn", computeEnvironment.EcsClusterArn) - d.Set("service_role", computeEnvironment.ServiceRole) - d.Set("state", computeEnvironment.State) - d.Set("status", computeEnvironment.Status) - d.Set("status_reason", computeEnvironment.StatusReason) - d.Set("type", computeEnvironmentType) - if computeEnvironment.ComputeResources != nil { - if err := d.Set("compute_resources", []interface{}{flattenComputeResource(computeEnvironment.ComputeResources)}); err != nil { + if err := d.Set("compute_resources", []interface{}{flattenComputeResource(ctx, computeEnvironment.ComputeResources)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting compute_resources: %s", err) } } else { d.Set("compute_resources", nil) } - + d.Set("ecs_cluster_arn", computeEnvironment.EcsClusterArn) if computeEnvironment.EksConfiguration != nil { if err := d.Set("eks_configuration", []interface{}{flattenEKSConfiguration(computeEnvironment.EksConfiguration)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting eks_configuration: %s", err) @@ -351,24 +373,24 @@ func resourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, } else { d.Set("eks_configuration", nil) } + d.Set("service_role", computeEnvironment.ServiceRole) + d.Set("state", computeEnvironment.State) + d.Set("status", computeEnvironment.Status) + d.Set("status_reason", computeEnvironment.StatusReason) + d.Set("type", computeEnvironmentType) - tags := KeyValueTags(computeEnvironment.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig) - - //lintignore:AWSR002 - if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) + if err := d.Set("update_policy", flattenComputeEnvironmentUpdatePolicy(computeEnvironment.UpdatePolicy)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting update_policy: %s", err) } - if err := d.Set("tags_all", tags.Map()); err != nil { - return sdkdiag.AppendErrorf(diags, "setting tags_all: %s", err) - } + setTagsOut(ctx, computeEnvironment.Tags) return diags } func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn() + conn := meta.(*conns.AWSClient).BatchConn(ctx) if d.HasChangesExcept("tags", "tags_all") { input := &batch.UpdateComputeEnvironmentInput{ @@ -383,20 +405,16 @@ func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceDat input.State = aws.String(d.Get("state").(string)) } + if d.HasChange("update_policy") { + input.UpdatePolicy = expandComputeEnvironmentUpdatePolicy(d.Get("update_policy").([]interface{})) + } + if computeEnvironmentType := strings.ToUpper(d.Get("type").(string)); computeEnvironmentType == batch.CETypeManaged { // "At least one compute-resources attribute must be specified" computeResourceUpdate := &batch.ComputeResourceUpdate{ MaxvCpus: aws.Int64(int64(d.Get("compute_resources.0.max_vcpus").(int))), } - if d.HasChange("compute_resources.0.desired_vcpus") { - computeResourceUpdate.DesiredvCpus = aws.Int64(int64(d.Get("compute_resources.0.desired_vcpus").(int))) - } - - if d.HasChange("compute_resources.0.min_vcpus") { - computeResourceUpdate.MinvCpus = aws.Int64(int64(d.Get("compute_resources.0.min_vcpus").(int))) - } - if d.HasChange("compute_resources.0.security_group_ids") { computeResourceUpdate.SecurityGroupIds = flex.ExpandStringSet(d.Get("compute_resources.0.security_group_ids").(*schema.Set)) } @@ -405,6 +423,96 @@ func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceDat computeResourceUpdate.Subnets = flex.ExpandStringSet(d.Get("compute_resources.0.subnets").(*schema.Set)) } + if d.HasChange("compute_resources.0.allocation_strategy") { + if allocationStrategy, ok := d.GetOk("compute_resources.0.allocation_strategy"); ok { + computeResourceUpdate.AllocationStrategy = aws.String(allocationStrategy.(string)) + } else { + computeResourceUpdate.AllocationStrategy = aws.String("") + } + } + + computeResourceEnvironmentType := d.Get("compute_resources.0.type").(string) + + if d.HasChange("compute_resources.0.type") { + computeResourceUpdate.Type = aws.String(computeResourceEnvironmentType) + } + + if !isFargateType(computeResourceEnvironmentType) { + if d.HasChange("compute_resources.0.desired_vcpus") { + if desiredvCpus, ok := d.GetOk("compute_resources.0.desired_vcpus"); ok { + computeResourceUpdate.DesiredvCpus = aws.Int64(int64(desiredvCpus.(int))) + } else { + computeResourceUpdate.DesiredvCpus = aws.Int64(0) + } + } + + if d.HasChange("compute_resources.0.min_vcpus") { + if minVcpus, ok := d.GetOk("compute_resources.0.min_vcpus"); ok { + computeResourceUpdate.MinvCpus = aws.Int64(int64(minVcpus.(int))) + } else { + computeResourceUpdate.MinvCpus = aws.Int64(0) + } + } + + if d.HasChange("compute_resources.0.bid_percentage") { + if bidPercentage, ok := d.GetOk("compute_resources.0.bid_percentage"); ok { + computeResourceUpdate.BidPercentage = aws.Int64(int64(bidPercentage.(int))) + } else { + computeResourceUpdate.BidPercentage = aws.Int64(0) + } + } + + if d.HasChange("compute_resources.0.ec2_configuration") { + defaultImageType := "ECS_AL2" + if _, ok := d.GetOk("eks_configuration.#"); ok { + defaultImageType = "EKS_AL2" + } + ec2Configuration := d.Get("compute_resources.0.ec2_configuration").([]interface{}) + computeResourceUpdate.Ec2Configuration = expandEC2ConfigurationsUpdate(ec2Configuration, defaultImageType) + } + + if d.HasChange("compute_resources.0.ec2_key_pair") { + if keyPair, ok := d.GetOk("compute_resources.0.ec2_key_pair"); ok { + computeResourceUpdate.Ec2KeyPair = aws.String(keyPair.(string)) + } else { + computeResourceUpdate.Ec2KeyPair = aws.String("") + } + } + + if d.HasChange("compute_resources.0.image_id") { + if imageId, ok := d.GetOk("compute_resources.0.image_id"); ok { + computeResourceUpdate.ImageId = aws.String(imageId.(string)) + } else { + computeResourceUpdate.ImageId = aws.String("") + } + } + + if d.HasChange("compute_resources.0.instance_role") { + if instanceRole, ok := d.GetOk("compute_resources.0.instance_role"); ok { + computeResourceUpdate.InstanceRole = aws.String(instanceRole.(string)) + } else { + computeResourceUpdate.InstanceRole = aws.String("") + } + } + + if d.HasChange("compute_resources.0.instance_type") { + computeResourceUpdate.InstanceTypes = flex.ExpandStringSet(d.Get("compute_resources.0.instance_type").(*schema.Set)) + } + + if d.HasChange("compute_resources.0.launch_template") { + launchTemplate := d.Get("compute_resources.0.launch_template").([]interface{}) + computeResourceUpdate.LaunchTemplate = expandLaunchTemplateSpecificationUpdate(launchTemplate) + } + + if d.HasChange("compute_resources.0.tags") { + if tags, ok := d.GetOk("compute_resources.0.tags"); ok { + computeResourceUpdate.Tags = Tags(tftags.New(ctx, tags.(map[string]interface{})).IgnoreAWS()) + } else { + computeResourceUpdate.Tags = aws.StringMap(map[string]string{}) + } + } + } + input.ComputeResources = computeResourceUpdate } @@ -418,20 +526,12 @@ func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceDat } } - if d.HasChange("tags_all") { - o, n := d.GetChange("tags_all") - - if err := UpdateTags(ctx, conn, d.Get("arn").(string), o, n); err != nil { - return sdkdiag.AppendErrorf(diags, "updating tags: %s", err) - } - } - return append(diags, resourceComputeEnvironmentRead(ctx, d, meta)...) } func resourceComputeEnvironmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn() + conn := meta.(*conns.AWSClient).BatchConn(ctx) log.Printf("[DEBUG] Disabling Batch Compute Environment: %s", d.Id()) { @@ -478,21 +578,103 @@ func resourceComputeEnvironmentCustomizeDiff(_ context.Context, diff *schema.Res if diff.Id() != "" { // Update. - computeResourceType := strings.ToUpper(diff.Get("compute_resources.0.type").(string)) - fargateComputeResources := false - if computeResourceType == batch.CRTypeFargate || computeResourceType == batch.CRTypeFargateSpot { - fargateComputeResources = true - } + fargateComputeResources := isFargateType(diff.Get("compute_resources.0.type").(string)) - if diff.HasChange("compute_resources.0.security_group_ids") && !fargateComputeResources { - if err := diff.ForceNew("compute_resources.0.security_group_ids"); err != nil { - return err + if !isUpdatableComputeEnvironment(diff) { + if diff.HasChange("compute_resources.0.security_group_ids") && !fargateComputeResources { + if err := diff.ForceNew("compute_resources.0.security_group_ids"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.subnets") && !fargateComputeResources { + if err := diff.ForceNew("compute_resources.0.subnets"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.allocation_strategy") { + if err := diff.ForceNew("compute_resources.0.allocation_strategy"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.bid_percentage") { + if err := diff.ForceNew("compute_resources.0.bid_percentage"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.ec2_configuration.#") { + if err := diff.ForceNew("compute_resources.0.ec2_configuration.#"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.ec2_configuration.0.image_id_override") { + if err := diff.ForceNew("compute_resources.0.ec2_configuration.0.image_id_override"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.ec2_configuration.0.image_type") { + if err := diff.ForceNew("compute_resources.0.ec2_configuration.0.image_type"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.ec2_key_pair") { + if err := diff.ForceNew("compute_resources.0.ec2_key_pair"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.image_id") { + if err := diff.ForceNew("compute_resources.0.image_id"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.instance_role") { + if err := diff.ForceNew("compute_resources.0.instance_role"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.instance_type") { + if err := diff.ForceNew("compute_resources.0.instance_type"); err != nil { + return err + } } - } - if diff.HasChange("compute_resources.0.subnets") && !fargateComputeResources { - if err := diff.ForceNew("compute_resources.0.subnets"); err != nil { - return err + if diff.HasChange("compute_resources.0.launch_template.#") { + if err := diff.ForceNew("compute_resources.0.launch_template.#"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.launch_template.0.launch_template_id") { + if err := diff.ForceNew("compute_resources.0.launch_template.0.launch_template_id"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.launch_template.0.launch_template_name") { + if err := diff.ForceNew("compute_resources.0.launch_template.0.launch_template_name"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.launch_template.0.version") { + if err := diff.ForceNew("compute_resources.0.launch_template.0.version"); err != nil { + return err + } + } + + if diff.HasChange("compute_resources.0.tags") { + if err := diff.ForceNew("compute_resources.0.tags"); err != nil { + return err + } } } } @@ -500,7 +682,199 @@ func resourceComputeEnvironmentCustomizeDiff(_ context.Context, diff *schema.Res return nil } -func expandComputeResource(tfMap map[string]interface{}) *batch.ComputeResource { +func findComputeEnvironmentDetailByName(ctx context.Context, conn *batch.Batch, name string) (*batch.ComputeEnvironmentDetail, error) { + input := &batch.DescribeComputeEnvironmentsInput{ + ComputeEnvironments: aws.StringSlice([]string{name}), + } + + output, err := findComputeEnvironmentDetail(ctx, conn, input) + + if err != nil { + return nil, err + } + + if status := aws.StringValue(output.Status); status == batch.CEStatusDeleted { + return nil, &retry.NotFoundError{ + Message: status, + LastRequest: input, + } + } + + return output, nil +} + +func findComputeEnvironmentDetail(ctx context.Context, conn *batch.Batch, input *batch.DescribeComputeEnvironmentsInput) (*batch.ComputeEnvironmentDetail, error) { + output, err := conn.DescribeComputeEnvironmentsWithContext(ctx, input) + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return tfresource.AssertSinglePtrResult(output.ComputeEnvironments) +} + +func statusComputeEnvironment(ctx context.Context, conn *batch.Batch, name string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + computeEnvironmentDetail, err := findComputeEnvironmentDetailByName(ctx, conn, name) + + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return computeEnvironmentDetail, aws.StringValue(computeEnvironmentDetail.Status), nil + } +} + +func waitComputeEnvironmentCreated(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{batch.CEStatusCreating}, + Target: []string{batch.CEStatusValid}, + Refresh: statusComputeEnvironment(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { + if status := aws.StringValue(output.Status); status == batch.CEStatusInvalid { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + } + + return output, err + } + + return nil, err +} + +func waitComputeEnvironmentDeleted(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{batch.CEStatusDeleting}, + Target: []string{}, + Refresh: statusComputeEnvironment(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { + if status := aws.StringValue(output.Status); status == batch.CEStatusInvalid { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + } + + return output, err + } + + return nil, err +} + +func waitComputeEnvironmentDisabled(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{batch.CEStatusUpdating}, + Target: []string{batch.CEStatusValid}, + Refresh: statusComputeEnvironment(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { + if status := aws.StringValue(output.Status); status == batch.CEStatusInvalid { + tfresource.SetLastError(err, errors.New(aws.StringValue(output.StatusReason))) + } + + return output, err + } + + return nil, err +} + +func waitComputeEnvironmentUpdated(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{batch.CEStatusUpdating}, + Target: []string{batch.CEStatusValid}, + Refresh: statusComputeEnvironment(ctx, conn, name), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if v, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { + return v, err + } + + return nil, err +} + +func isFargateType(computeResourceType string) bool { + if computeResourceType == batch.CRTypeFargate || computeResourceType == batch.CRTypeFargateSpot { + return true + } + return false +} + +func isUpdatableComputeEnvironment(diff *schema.ResourceDiff) bool { + if !isServiceLinkedRoleDiff(diff) { + return false + } + if !isUpdatableAllocationStrategyDiff(diff) { + return false + } + return true +} + +func isServiceLinkedRoleDiff(diff *schema.ResourceDiff) bool { + var before, after string + if diff.HasChange("service_role") { + beforeRaw, afterRaw := diff.GetChange("service_role") + before, _ = beforeRaw.(string) + after, _ := afterRaw.(string) + return isServiceLinkedRole(before) && isServiceLinkedRole(after) + } + afterRaw, _ := diff.GetOk("service_role") + after, _ = afterRaw.(string) + return isServiceLinkedRole(after) +} + +func isServiceLinkedRole(roleArn string) bool { + if roleArn == "" { + // Empty role ARN defaults to AWS service-linked role + return true + } + re := regexache.MustCompile(`arn:[^:]+:iam::\d{12}:role/aws-service-role/batch\.amazonaws\.com/*`) + return re.MatchString(roleArn) +} + +func isUpdatableAllocationStrategyDiff(diff *schema.ResourceDiff) bool { + var before, after string + if computeResourcesCount, ok := diff.Get("compute_resources.#").(int); ok { + if computeResourcesCount > 0 { + if diff.HasChange("compute_resources.0.allocation_strategy") { + beforeRaw, afterRaw := diff.GetChange("compute_resources.0.allocation_strategy") + before, _ = beforeRaw.(string) + after, _ = afterRaw.(string) + return isUpdatableAllocationStrategy(before) && isUpdatableAllocationStrategy(after) + } + afterRaw, _ := diff.GetOk("compute_resources.0.allocation_strategy") + after, _ := afterRaw.(string) + return isUpdatableAllocationStrategy(after) + } + } + return false +} + +func isUpdatableAllocationStrategy(allocationStrategy string) bool { + return allocationStrategy == batch.CRAllocationStrategyBestFitProgressive || allocationStrategy == batch.CRAllocationStrategySpotCapacityOptimized +} + +func expandComputeResource(ctx context.Context, tfMap map[string]interface{}) *batch.ComputeResource { if tfMap == nil { return nil } @@ -545,7 +919,7 @@ func expandComputeResource(tfMap map[string]interface{}) *batch.ComputeResource apiObject.InstanceTypes = flex.ExpandStringSet(v) } - if v, ok := tfMap["launch_template"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["launch_template"].([]interface{}); ok && len(v) > 0 && v[0] != nil { apiObject.LaunchTemplate = expandLaunchTemplateSpecification(v[0].(map[string]interface{})) } @@ -559,6 +933,10 @@ func expandComputeResource(tfMap map[string]interface{}) *batch.ComputeResource apiObject.MinvCpus = aws.Int64(0) } + if v, ok := tfMap["placement_group"].(string); ok && v != "" { + apiObject.PlacementGroup = aws.String(v) + } + if v, ok := tfMap["security_group_ids"].(*schema.Set); ok && v.Len() > 0 { apiObject.SecurityGroupIds = flex.ExpandStringSet(v) } @@ -572,7 +950,7 @@ func expandComputeResource(tfMap map[string]interface{}) *batch.ComputeResource } if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { - apiObject.Tags = Tags(tftags.New(v).IgnoreAWS()) + apiObject.Tags = Tags(tftags.New(ctx, v).IgnoreAWS()) } if computeResourceType != "" { @@ -666,7 +1044,65 @@ func expandLaunchTemplateSpecification(tfMap map[string]interface{}) *batch.Laun return apiObject } -func flattenComputeResource(apiObject *batch.ComputeResource) map[string]interface{} { +func expandEC2ConfigurationsUpdate(tfList []interface{}, defaultImageType string) []*batch.Ec2Configuration { + if len(tfList) == 0 { + return []*batch.Ec2Configuration{ + { + ImageType: aws.String(defaultImageType), + }, + } + } + + var apiObjects []*batch.Ec2Configuration + + for _, tfMapRaw := range tfList { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + apiObject := expandEC2Configuration(tfMap) + + if apiObject == nil { + continue + } + + apiObjects = append(apiObjects, apiObject) + } + + return apiObjects +} + +func expandLaunchTemplateSpecificationUpdate(tfList []interface{}) *batch.LaunchTemplateSpecification { + if len(tfList) == 0 || tfList[0] == nil { + // delete any existing launch template configuration + return &batch.LaunchTemplateSpecification{ + LaunchTemplateId: aws.String(""), + } + } + + tfMap := tfList[0].(map[string]interface{}) + apiObject := &batch.LaunchTemplateSpecification{} + + if v, ok := tfMap["launch_template_id"].(string); ok && v != "" { + apiObject.LaunchTemplateId = aws.String(v) + } + + if v, ok := tfMap["launch_template_name"].(string); ok && v != "" { + apiObject.LaunchTemplateName = aws.String(v) + } + + if v, ok := tfMap["version"].(string); ok { + apiObject.Version = aws.String(v) + } else { + apiObject.Version = aws.String("") + } + + return apiObject +} + +func flattenComputeResource(ctx context.Context, apiObject *batch.ComputeResource) map[string]interface{} { if apiObject == nil { return nil } @@ -717,6 +1153,10 @@ func flattenComputeResource(apiObject *batch.ComputeResource) map[string]interfa tfMap["min_vcpus"] = aws.Int64Value(v) } + if v := apiObject.PlacementGroup; v != nil { + tfMap["placement_group"] = aws.StringValue(v) + } + if v := apiObject.SecurityGroupIds; v != nil { tfMap["security_group_ids"] = aws.StringValueSlice(v) } @@ -730,7 +1170,7 @@ func flattenComputeResource(apiObject *batch.ComputeResource) map[string]interfa } if v := apiObject.Tags; v != nil { - tfMap["tags"] = KeyValueTags(v).IgnoreAWS().Map() + tfMap["tags"] = KeyValueTags(ctx, v).IgnoreAWS().Map() } if v := apiObject.Type; v != nil { @@ -815,3 +1255,31 @@ func flattenLaunchTemplateSpecification(apiObject *batch.LaunchTemplateSpecifica return tfMap } + +func expandComputeEnvironmentUpdatePolicy(l []interface{}) *batch.UpdatePolicy { + if len(l) == 0 || l[0] == nil { + return nil + } + + m := l[0].(map[string]interface{}) + + up := &batch.UpdatePolicy{ + JobExecutionTimeoutMinutes: aws.Int64(int64(m["job_execution_timeout_minutes"].(int))), + TerminateJobsOnUpdate: aws.Bool(bool(m["terminate_jobs_on_update"].(bool))), + } + + return up +} + +func flattenComputeEnvironmentUpdatePolicy(up *batch.UpdatePolicy) []interface{} { + if up == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "job_execution_timeout_minutes": aws.Int64Value(up.JobExecutionTimeoutMinutes), + "terminate_jobs_on_update": aws.BoolValue(up.TerminateJobsOnUpdate), + } + + return []interface{}{m} +} diff --git a/internal/service/batch/compute_environment_data_source.go b/internal/service/batch/compute_environment_data_source.go index 3c4fc60db39..b375b31f220 100644 --- a/internal/service/batch/compute_environment_data_source.go +++ b/internal/service/batch/compute_environment_data_source.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package batch import ( @@ -12,6 +15,7 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) +// @SDKDataSource("aws_batch_compute_environment") func DataSourceComputeEnvironment() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceComputeEnvironmentRead, @@ -58,13 +62,29 @@ func DataSourceComputeEnvironment() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "update_policy": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "job_execution_timeout_minutes": { + Type: schema.TypeInt, + Computed: true, + }, + "terminate_jobs_on_update": { + Type: schema.TypeBool, + Computed: true, + }, + }, + }, + }, }, } } func dataSourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).BatchConn() + conn := meta.(*conns.AWSClient).BatchConn(ctx) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig params := &batch.DescribeComputeEnvironmentsInput{ @@ -93,7 +113,11 @@ func dataSourceComputeEnvironmentRead(ctx context.Context, d *schema.ResourceDat d.Set("status_reason", computeEnvironment.StatusReason) d.Set("state", computeEnvironment.State) - if err := d.Set("tags", KeyValueTags(computeEnvironment.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { + if err := d.Set("update_policy", flattenComputeEnvironmentUpdatePolicy(computeEnvironment.UpdatePolicy)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting update_policy: %s", err) + } + + if err := d.Set("tags", KeyValueTags(ctx, computeEnvironment.Tags).IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } diff --git a/internal/service/batch/compute_environment_data_source_test.go b/internal/service/batch/compute_environment_data_source_test.go index 7e95ac96b16..b660c5159af 100644 --- a/internal/service/batch/compute_environment_data_source_test.go +++ b/internal/service/batch/compute_environment_data_source_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package batch_test import ( @@ -5,8 +8,8 @@ import ( "testing" "github.com/aws/aws-sdk-go/service/batch" - sdkacctest "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-provider-aws/internal/acctest" ) @@ -17,7 +20,7 @@ func TestAccBatchComputeEnvironmentDataSource_basic(t *testing.T) { datasourceName := "data.aws_batch_compute_environment.by_name" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t); testAccPreCheck(ctx, t) }, + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ @@ -31,6 +34,32 @@ func TestAccBatchComputeEnvironmentDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "state", resourceName, "state"), resource.TestCheckResourceAttrPair(datasourceName, "tags.%", resourceName, "tags.%"), resource.TestCheckResourceAttrPair(datasourceName, "type", resourceName, "type"), + resource.TestCheckResourceAttr(datasourceName, "update_policy.#", "0"), + ), + }, + }, + }) +} + +func TestAccBatchComputeEnvironmentDataSource_basicUpdatePolicy(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix("tf_acc_test_") + resourceName := "aws_batch_compute_environment.test" + datasourceName := "data.aws_batch_compute_environment.by_name" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccComputeEnvironmentDataSourceConfig_updatePolicy(rName, 30, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrPair(datasourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttr(datasourceName, "update_policy.#", "1"), + resource.TestCheckResourceAttr(datasourceName, "update_policy.0.%", "2"), + resource.TestCheckResourceAttr(datasourceName, "update_policy.0.terminate_jobs_on_update", "false"), + resource.TestCheckResourceAttr(datasourceName, "update_policy.0.job_execution_timeout_minutes", "30"), ), }, }, @@ -70,30 +99,6 @@ resource "aws_iam_instance_profile" "ecs_instance_role" { role = aws_iam_role.ecs_instance_role.name } -resource "aws_iam_role" "aws_batch_service_role" { - name = "batch_%[1]s" - - assume_role_policy = < Date: Fri, 10 Nov 2023 10:51:40 -0500 Subject: [PATCH 032/438] add changelog --- .changelog/CHANGEME.txt | 7 ------- 1 file changed, 7 deletions(-) delete mode 100644 .changelog/CHANGEME.txt diff --git a/.changelog/CHANGEME.txt b/.changelog/CHANGEME.txt deleted file mode 100644 index 521c87eaf7a..00000000000 --- a/.changelog/CHANGEME.txt +++ /dev/null @@ -1,7 +0,0 @@ -```release-note:enhancement -resource/aws_batch_compute_environment: Add `update_policy` parameter -``` - -```release-note:enhancement -data-source/aws_batch_compute_environment: Add `update_policy` attribute -``` \ No newline at end of file From 38c7147182ef11c1a0bc26ad764309eb74d78f0d Mon Sep 17 00:00:00 2001 From: Drew Mullen Date: Fri, 10 Nov 2023 10:58:06 -0500 Subject: [PATCH 033/438] add changelog --- .changelog/34353.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/34353.txt diff --git a/.changelog/34353.txt b/.changelog/34353.txt new file mode 100644 index 00000000000..521c87eaf7a --- /dev/null +++ b/.changelog/34353.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_batch_compute_environment: Add `update_policy` parameter +``` + +```release-note:enhancement +data-source/aws_batch_compute_environment: Add `update_policy` attribute +``` \ No newline at end of file From a2fa6bfc66d08365471a2d88ee9d81002a9869ab Mon Sep 17 00:00:00 2001 From: Drew Mullen Date: Fri, 10 Nov 2023 12:36:50 -0500 Subject: [PATCH 034/438] fix ci issues --- internal/service/batch/compute_environment.go | 14 ++--- .../compute_environment_data_source_test.go | 2 +- .../service/batch/compute_environment_test.go | 52 +++++-------------- .../d/batch_compute_environment.html.markdown | 2 +- .../r/batch_compute_environment.html.markdown | 12 ++--- 5 files changed, 29 insertions(+), 53 deletions(-) diff --git a/internal/service/batch/compute_environment.go b/internal/service/batch/compute_environment.go index 2af0598d449..9148b413931 100644 --- a/internal/service/batch/compute_environment.go +++ b/internal/service/batch/compute_environment.go @@ -329,7 +329,7 @@ func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceDat return sdkdiag.AppendErrorf(diags, "Create Batch Compute Environment extra arguments through UpdateComputeEnvironment (%s): %s", d.Id(), err) } - if _, err := waitComputeEnvironmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := waitComputeEnvironmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "Create waiting for Batch Compute Environment (%s) extra arguments through UpdateComputeEnvironment: %s", d.Id(), err) } } @@ -521,7 +521,7 @@ func resourceComputeEnvironmentUpdate(ctx context.Context, d *schema.ResourceDat return sdkdiag.AppendErrorf(diags, "updating Batch Compute Environment (%s): %s", d.Id(), err) } - if _, err := waitComputeEnvironmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + if err := waitComputeEnvironmentUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for Batch Compute Environment (%s) update: %s", d.Id(), err) } } @@ -796,7 +796,7 @@ func waitComputeEnvironmentDisabled(ctx context.Context, conn *batch.Batch, name return nil, err } -func waitComputeEnvironmentUpdated(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) (*batch.ComputeEnvironmentDetail, error) { +func waitComputeEnvironmentUpdated(ctx context.Context, conn *batch.Batch, name string, timeout time.Duration) error { stateConf := &retry.StateChangeConf{ Pending: []string{batch.CEStatusUpdating}, Target: []string{batch.CEStatusValid}, @@ -806,11 +806,11 @@ func waitComputeEnvironmentUpdated(ctx context.Context, conn *batch.Batch, name outputRaw, err := stateConf.WaitForStateContext(ctx) - if v, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { - return v, err + if _, ok := outputRaw.(*batch.ComputeEnvironmentDetail); ok { + return err } - return nil, err + return err } func isFargateType(computeResourceType string) bool { @@ -1265,7 +1265,7 @@ func expandComputeEnvironmentUpdatePolicy(l []interface{}) *batch.UpdatePolicy { up := &batch.UpdatePolicy{ JobExecutionTimeoutMinutes: aws.Int64(int64(m["job_execution_timeout_minutes"].(int))), - TerminateJobsOnUpdate: aws.Bool(bool(m["terminate_jobs_on_update"].(bool))), + TerminateJobsOnUpdate: aws.Bool(m["terminate_jobs_on_update"].(bool)), } return up diff --git a/internal/service/batch/compute_environment_data_source_test.go b/internal/service/batch/compute_environment_data_source_test.go index b660c5159af..dc86e812a04 100644 --- a/internal/service/batch/compute_environment_data_source_test.go +++ b/internal/service/batch/compute_environment_data_source_test.go @@ -136,7 +136,7 @@ resource "aws_batch_compute_environment" "test" { type = "EC2" } - type = "MANAGED" + type = "MANAGED" } data "aws_batch_compute_environment" "by_name" { diff --git a/internal/service/batch/compute_environment_test.go b/internal/service/batch/compute_environment_test.go index 3fc7ad366bf..3e0fe56534f 100644 --- a/internal/service/batch/compute_environment_test.go +++ b/internal/service/batch/compute_environment_test.go @@ -428,7 +428,7 @@ func TestAccBatchComputeEnvironment_updatePolicyUpdate(t *testing.T) { CheckDestroy: testAccCheckComputeEnvironmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccComputeEnvironmentConfig_ec2UpdatePolicyUpdate(rName), + Config: testAccComputeEnvironmentConfig_ec2UpdatePolicyOmitted(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckComputeEnvironmentExists(ctx, resourceName, &ce), acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "batch", fmt.Sprintf("compute-environment/%s", rName)), @@ -2488,11 +2488,11 @@ resource "aws_batch_compute_environment" "test" { compute_environment_name = %[1]q compute_resources { - allocation_strategy = "BEST_FIT_PROGRESSIVE" + allocation_strategy = "BEST_FIT_PROGRESSIVE" instance_role = aws_iam_instance_profile.ecs_instance.arn - instance_type = ["optimal"] + instance_type = ["optimal"] max_vcpus = 4 - min_vcpus = 0 + min_vcpus = 0 security_group_ids = [ aws_security_group.test.id ] @@ -2501,39 +2501,15 @@ resource "aws_batch_compute_environment" "test" { ] type = "EC2" } - update_policy { - job_execution_timeout_minutes = %[2]d - terminate_jobs_on_update = %[3]v - } - - type = "MANAGED" -} -`, rName, timeout, terminate)) -} -func testAccComputeEnvironmentConfig_ec2UpdatePolicyUpdate(rName string) string { - return acctest.ConfigCompose(testAccComputeEnvironmentConfig_baseDefaultSLR(rName), fmt.Sprintf(` -resource "aws_batch_compute_environment" "test" { - compute_environment_name = %[1]q - - compute_resources { - allocation_strategy = "BEST_FIT_PROGRESSIVE" - instance_role = aws_iam_instance_profile.ecs_instance.arn - instance_type = ["optimal"] - max_vcpus = 4 - min_vcpus = 0 - security_group_ids = [ - aws_security_group.test.id - ] - subnets = [ - aws_subnet.test.id - ] - type = "EC2" + update_policy { + job_execution_timeout_minutes = %[2]d + terminate_jobs_on_update = %[3]v } - type = "MANAGED" + type = "MANAGED" } -`, rName)) +`, rName, timeout, terminate)) } func testAccComputeEnvironmentConfig_ec2UpdatePolicyOmitted(rName string) string { @@ -2542,12 +2518,12 @@ resource "aws_batch_compute_environment" "test" { compute_environment_name = %[1]q compute_resources { - allocation_strategy = "BEST_FIT_PROGRESSIVE" + allocation_strategy = "BEST_FIT_PROGRESSIVE" instance_role = aws_iam_instance_profile.ecs_instance.arn - instance_type = ["optimal"] + instance_type = ["optimal"] max_vcpus = 4 - min_vcpus = 0 - security_group_ids = [ + min_vcpus = 0 + security_group_ids = [ aws_security_group.test.id ] subnets = [ @@ -2556,7 +2532,7 @@ resource "aws_batch_compute_environment" "test" { type = "EC2" } - type = "MANAGED" + type = "MANAGED" } `, rName)) } diff --git a/website/docs/d/batch_compute_environment.html.markdown b/website/docs/d/batch_compute_environment.html.markdown index 1cc6867197f..5d9a2321862 100644 --- a/website/docs/d/batch_compute_environment.html.markdown +++ b/website/docs/d/batch_compute_environment.html.markdown @@ -36,5 +36,5 @@ This data source exports the following attributes in addition to the arguments a * `status` - Current status of the compute environment (for example, `CREATING` or `VALID`). * `status_reason` - Short, human-readable string to provide additional details about the current status of the compute environment. * `state` - State of the compute environment (for example, `ENABLED` or `DISABLED`). If the state is `ENABLED`, then the compute environment accepts jobs from a queue and can scale out automatically based on queues. -* `update_policy` - (Optional) Specifies the infrastructure update policy for the compute environment. +* `update_policy` - (Optional) Specifies the infrastructure update policy for the compute environment. * `tags` - Key-value map of resource tags diff --git a/website/docs/r/batch_compute_environment.html.markdown b/website/docs/r/batch_compute_environment.html.markdown index c905e5c29d7..62fec75d28b 100644 --- a/website/docs/r/batch_compute_environment.html.markdown +++ b/website/docs/r/batch_compute_environment.html.markdown @@ -167,7 +167,7 @@ resource "aws_batch_compute_environment" "sample" { instance_type = ["optimal"] max_vcpus = 4 min_vcpus = 0 - security_group_ids = [ + security_group_ids = [ aws_security_group.sample.id ] subnets = [ @@ -176,12 +176,12 @@ resource "aws_batch_compute_environment" "sample" { type = "EC2" } - update_policy { - job_execution_timeout_minutes = 30 - terminate_jobs_on_update = false - } + update_policy { + job_execution_timeout_minutes = 30 + terminate_jobs_on_update = false + } - type = "MANAGED" + type = "MANAGED" } ``` From b8b643b39f2404bec09cf28bff9eb51cb01b3694 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sat, 11 Nov 2023 01:03:26 +0100 Subject: [PATCH 035/438] feat: remove readonly properties and updated tests --- internal/service/dms/replication_config.go | 44 +++++++++++++++++-- .../service/dms/replication_config_test.go | 1 + 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index 0e28a878388..efa7bae2003 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -5,6 +5,7 @@ package dms import ( "context" + "encoding/json" "fmt" "log" "time" @@ -112,9 +113,11 @@ func ResourceReplicationConfig() *schema.Resource { ForceNew: true, }, "replication_settings": { - Type: schema.TypeString, - Optional: true, - Computed: true, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, }, "replication_type": { Type: schema.TypeString, @@ -230,13 +233,19 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set("replication_config_identifier", replicationConfig.ReplicationConfigIdentifier) - d.Set("replication_settings", replicationConfig.ReplicationSettings) d.Set("replication_type", replicationConfig.ReplicationType) d.Set("source_endpoint_arn", replicationConfig.SourceEndpointArn) d.Set("supplemental_settings", replicationConfig.SupplementalSettings) d.Set("table_mappings", replicationConfig.TableMappings) d.Set("target_endpoint_arn", replicationConfig.TargetEndpointArn) + settings, err := replicationConfigRemoveReadOnlySettings(aws.StringValue(replicationConfig.ReplicationSettings)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading DMS Replication Config (%s): %s", d.Id(), err) + } + + d.Set("replication_settings", settings) + return diags } @@ -655,3 +664,30 @@ func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { return apiObject } + +func replicationConfigRemoveReadOnlySettings(settings string) (*string, error) { + var settingsData map[string]interface{} + if err := json.Unmarshal([]byte(settings), &settingsData); err != nil { + return nil, err + } + + controlTablesSettings, ok := settingsData["ControlTablesSettings"].(map[string]interface{}) + if ok { + delete(controlTablesSettings, "historyTimeslotInMinutes") + } + + logging, ok := settingsData["Logging"].(map[string]interface{}) + if ok { + delete(logging, "EnableLogContext") + delete(logging, "CloudWatchLogGroup") + delete(logging, "CloudWatchLogStream") + } + + cleanedSettings, err := json.Marshal(settingsData) + if err != nil { + return nil, err + } + + cleanedSettingsString := string(cleanedSettings) + return &cleanedSettingsString, nil +} diff --git a/internal/service/dms/replication_config_test.go b/internal/service/dms/replication_config_test.go index 3f009e15b7b..fa2a9dfe610 100644 --- a/internal/service/dms/replication_config_test.go +++ b/internal/service/dms/replication_config_test.go @@ -386,6 +386,7 @@ resource "aws_dms_replication_config" "test" { source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + replication_settings = "{\"BeforeImageSettings\":null,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"CommitPositionTableEnabled\":false,\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FailTaskWhenCleanTaskResourceFailed\":false,\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TTSettings\":{\"EnableTT\":false,\"FailTaskOnTTFailure\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false}}" compute_config { replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id From 942efeacf5f1caf2b3cbbc0b1ed010b345cbe748 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sat, 11 Nov 2023 01:31:07 +0100 Subject: [PATCH 036/438] chore: added changelog --- .changelog/34356.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34356.txt diff --git a/.changelog/34356.txt b/.changelog/34356.txt new file mode 100644 index 00000000000..2d9f2866f79 --- /dev/null +++ b/.changelog/34356.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dms_replication_config: Remove read-only properties from `replication_settings` to supress diffs +``` From b5b160a84d51c40da8460a0ccf2af06f28793dd4 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sat, 11 Nov 2023 01:32:46 +0100 Subject: [PATCH 037/438] chore: fmt changelog --- .changelog/34356.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/34356.txt b/.changelog/34356.txt index 2d9f2866f79..1dd0e3aafe1 100644 --- a/.changelog/34356.txt +++ b/.changelog/34356.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/aws_dms_replication_config: Remove read-only properties from `replication_settings` to supress diffs +resource/aws_dms_replication_config: Remove read-only properties from `replication_settings` to suppress diffs ``` From bbebdfe36191d4d6e3cc7d4f894fa9628c5381a8 Mon Sep 17 00:00:00 2001 From: Drew Mullen Date: Sat, 11 Nov 2023 08:20:15 -0500 Subject: [PATCH 038/438] fix ci issues --- .../compute_environment_data_source_test.go | 18 +++++++++--------- .../service/batch/compute_environment_test.go | 4 ++-- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/service/batch/compute_environment_data_source_test.go b/internal/service/batch/compute_environment_data_source_test.go index dc86e812a04..d72d4b805b2 100644 --- a/internal/service/batch/compute_environment_data_source_test.go +++ b/internal/service/batch/compute_environment_data_source_test.go @@ -151,12 +151,12 @@ resource "aws_batch_compute_environment" "test" { compute_environment_name = %[1]q compute_resources { - allocation_strategy = "BEST_FIT_PROGRESSIVE" + allocation_strategy = "BEST_FIT_PROGRESSIVE" instance_role = aws_iam_instance_profile.ecs_instance.arn - instance_type = ["optimal"] + instance_type = ["optimal"] max_vcpus = 4 - min_vcpus = 0 - security_group_ids = [ + min_vcpus = 0 + security_group_ids = [ aws_security_group.test.id ] subnets = [ @@ -164,12 +164,12 @@ resource "aws_batch_compute_environment" "test" { ] type = "EC2" } - update_policy { - job_execution_timeout_minutes = %[2]d - terminate_jobs_on_update = %[3]v - } + update_policy { + job_execution_timeout_minutes = %[2]d + terminate_jobs_on_update = %[3]t + } - type = "MANAGED" + type = "MANAGED" } data "aws_batch_compute_environment" "by_name" { diff --git a/internal/service/batch/compute_environment_test.go b/internal/service/batch/compute_environment_test.go index 3e0fe56534f..a470de67af3 100644 --- a/internal/service/batch/compute_environment_test.go +++ b/internal/service/batch/compute_environment_test.go @@ -2493,7 +2493,7 @@ resource "aws_batch_compute_environment" "test" { instance_type = ["optimal"] max_vcpus = 4 min_vcpus = 0 - security_group_ids = [ + security_group_ids = [ aws_security_group.test.id ] subnets = [ @@ -2504,7 +2504,7 @@ resource "aws_batch_compute_environment" "test" { update_policy { job_execution_timeout_minutes = %[2]d - terminate_jobs_on_update = %[3]v + terminate_jobs_on_update = %[3]t } type = "MANAGED" From 974ffdbc3ae6887f1efb1df795809e5e5501ff8a Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Tue, 31 Oct 2023 08:50:12 +0100 Subject: [PATCH 039/438] wip - added postgre-sql-settings --- internal/service/dms/endpoint.go | 76 ++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index c79a0a76656..38da2b9addc 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -348,6 +348,82 @@ func ResourceEndpoint() *schema.Resource { Optional: true, ConflictsWith: []string{"secrets_manager_access_role_arn", "secrets_manager_arn"}, }, + "postgres_settings": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "after_connect_script": { + Type: schema.TypeString, + Optional: true, + }, + "capture_ddls": { + Type: schema.TypeBool, + Optional: true, + }, + "max_file_size": { + Type: schema.TypeInt, + Optional: true, + //Default: encryptionModeSseS3, + //ValidateFunc: validation.StringInSlice(encryptionMode_Values(), false), + }, + "ddl_artifacts_schema": { + Type: schema.TypeString, + Optional: true, + //DiffSuppressFunc: tfkms.DiffSuppressKey, + //ValidateFunc: tfkms.ValidateKey, + }, + "execute_timeout": { + Type: schema.TypeInt, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "fail_tasks_on_lob_truncation": { + Type: schema.TypeBool, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "heartbeat_enable": { + Type: schema.TypeBool, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "heartbeat_schema": { + Type: schema.TypeString, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "heartbeat_frequency": { + Type: schema.TypeInt, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "slot_name": { + Type: schema.TypeString, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "plugin_name": { // add validation function + Type: schema.TypeString, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "trim_space_in_char": { + Type: schema.TypeBool, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + "map_boolean_as_boolean": { + Type: schema.TypeBool, + Optional: true, + // ValidateFunc: verify.ValidARN, + }, + }, + }, + }, "redis_settings": { Type: schema.TypeList, Optional: true, From 50ec6c3f7c6bc3b00dbd1caa01a5dacdbaff5e19 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Sat, 25 Nov 2023 15:32:50 +0100 Subject: [PATCH 040/438] wip - little changes --- internal/service/dms/endpoint.go | 28 ++++++++++++++++------------ 1 file changed, 16 insertions(+), 12 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 38da2b9addc..3523cc232d1 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -828,24 +828,22 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } case engineNameAuroraPostgresql, engineNamePostgres: + postgres_settings := expandPostgreSQLSettings() if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - DatabaseName: aws.String(d.Get("database_name").(string)), - } + postgres_settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + postgres_settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) + postgres_settings.DatabaseName = aws.String(d.Get("database_name").(string)) } else { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), - } + postgres_settings.Username = aws.String(d.Get("username").(string)) + postgres_settings.Password = aws.String(d.Get("password").(string)) + postgres_settings.ServerName = aws.String(d.Get("server_name").(string)) + postgres_settings.Port = aws.Int64(int64(d.Get("port").(int))) + postgres_settings.DatabaseName = aws.String(d.Get("database_name").(string)) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, input) } + input.PostgreSQLSettings = postgres_settings case engineNameDynamoDB: input.DynamoDbSettings = &dms.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), @@ -2140,6 +2138,12 @@ func flattenRedshiftSettings(settings *dms.RedshiftSettings) []map[string]interf return []map[string]interface{}{m} } +func expandPostgreSQLSettings() *dms.PostgreSQLSettings { + settings := &dms.PostgreSQLSettings{} + + return settings +} + func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { if tfMap == nil { return nil From 5fd667c357a93b1c57607b5b76a72d0d6fbbea58 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Sat, 25 Nov 2023 17:24:46 +0100 Subject: [PATCH 041/438] added postgres_settings support --- internal/service/dms/endpoint.go | 48 ++++++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 3523cc232d1..7aeeb341bc0 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -828,7 +828,11 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } case engineNameAuroraPostgresql, engineNamePostgres: - postgres_settings := expandPostgreSQLSettings() + postgres_settings := &dms.PostgreSQLSettings{} + if _, ok := d.GetOk("postgres_settings"); ok { + postgres_settings = expandPostgreSQLSettings(d.Get("postgres_settings").([]interface{})[0].(map[string]interface{})) + } + if _, ok := d.GetOk("secrets_manager_arn"); ok { postgres_settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) postgres_settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) @@ -2138,9 +2142,49 @@ func flattenRedshiftSettings(settings *dms.RedshiftSettings) []map[string]interf return []map[string]interface{}{m} } -func expandPostgreSQLSettings() *dms.PostgreSQLSettings { +func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSettings { settings := &dms.PostgreSQLSettings{} + if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { + settings.AfterConnectScript = aws.String(v) + } + if v, ok := tfMap["capture_ddls"].(bool); ok { + settings.CaptureDdls = aws.Bool(v) + } + if v, ok := tfMap["max_file_size"].(int); ok { + settings.MaxFileSize = aws.Int64(int64(v)) + } + if v, ok := tfMap["ddl_artifacts_schema"].(string); ok && v != "" { + settings.DdlArtifactsSchema = aws.String(v) + } + if v, ok := tfMap["execute_timeout"].(int); ok { + settings.ExecuteTimeout = aws.Int64(int64(v)) + } + if v, ok := tfMap["fail_tasks_on_lob_truncation"].(bool); ok { + settings.FailTasksOnLobTruncation = aws.Bool(v) + } + if v, ok := tfMap["heartbeat_enable"].(bool); ok { + settings.HeartbeatEnable = aws.Bool(v) + } + if v, ok := tfMap["heartbeat_schema"].(string); ok && v != "" { + settings.HeartbeatSchema = aws.String(v) + } + if v, ok := tfMap["heartbeat_frequency"].(int); ok { + settings.HeartbeatFrequency = aws.Int64(int64(v)) + } + if v, ok := tfMap["slot_name"].(string); ok && v != "" { + settings.SlotName = aws.String(v) + } + if v, ok := tfMap["plugin_name"].(string); ok && v != "" { + settings.PluginName = aws.String(v) + } + if v, ok := tfMap["trim_space_in_char"].(bool); ok { + settings.TrimSpaceInChar = aws.Bool(v) + } + if v, ok := tfMap["map_boolean_as_boolean"].(bool); ok { + settings.MapBooleanAsBoolean = aws.Bool(v) + } + return settings } From 4be6318c9e5e6cad9aae1c18d44ec5c5bc42f203 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Mon, 27 Nov 2023 16:42:28 +0100 Subject: [PATCH 042/438] test passed. waiting missing ones --- internal/service/dms/endpoint.go | 16 +-------- internal/service/dms/endpoint_test.go | 49 +++++++++++++++++++++++++++ 2 files changed, 50 insertions(+), 15 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 7aeeb341bc0..1f62af64ad3 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -351,7 +351,6 @@ func ResourceEndpoint() *schema.Resource { "postgres_settings": { Type: schema.TypeList, Optional: true, - Computed: true, MaxItems: 1, DiffSuppressFunc: verify.SuppressMissingOptionalConfigurationBlock, Elem: &schema.Resource{ @@ -367,59 +366,46 @@ func ResourceEndpoint() *schema.Resource { "max_file_size": { Type: schema.TypeInt, Optional: true, - //Default: encryptionModeSseS3, - //ValidateFunc: validation.StringInSlice(encryptionMode_Values(), false), }, "ddl_artifacts_schema": { Type: schema.TypeString, Optional: true, - //DiffSuppressFunc: tfkms.DiffSuppressKey, - //ValidateFunc: tfkms.ValidateKey, }, "execute_timeout": { Type: schema.TypeInt, Optional: true, - // ValidateFunc: verify.ValidARN, }, "fail_tasks_on_lob_truncation": { Type: schema.TypeBool, Optional: true, - // ValidateFunc: verify.ValidARN, }, "heartbeat_enable": { Type: schema.TypeBool, Optional: true, - // ValidateFunc: verify.ValidARN, }, "heartbeat_schema": { Type: schema.TypeString, Optional: true, - // ValidateFunc: verify.ValidARN, }, "heartbeat_frequency": { Type: schema.TypeInt, Optional: true, - // ValidateFunc: verify.ValidARN, }, "slot_name": { Type: schema.TypeString, Optional: true, - // ValidateFunc: verify.ValidARN, }, - "plugin_name": { // add validation function + "plugin_name": { Type: schema.TypeString, Optional: true, - // ValidateFunc: verify.ValidARN, }, "trim_space_in_char": { Type: schema.TypeBool, Optional: true, - // ValidateFunc: verify.ValidARN, }, "map_boolean_as_boolean": { Type: schema.TypeBool, Optional: true, - // ValidateFunc: verify.ValidARN, }, }, }, diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 2401e7f01c4..a37415ef14a 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -1349,6 +1349,32 @@ func TestAccDMSEndpoint_PostgreSQL_kmsKey(t *testing.T) { }) } +func TestAccDMSEndpoint_PostgreSQL_settings(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_postgreSQLSettings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.after_connect_script", "SET search_path TO pg_catalog,public;"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.capture_ddls", "true"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.max_file_size", "1024"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.execute_timeout", "100"), + ), + }, + }, + }) +} + func TestAccDMSEndpoint_SQLServer_basic(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dms_endpoint.test" @@ -3800,6 +3826,29 @@ resource "aws_dms_endpoint" "test" { `, rName) } +func testAccEndpointConfig_postgreSQLSettings(rName string) string { + return fmt.Sprintf(` +resource "aws_dms_endpoint" "test" { + endpoint_id = %[1]q + endpoint_type = "source" + engine_name = "postgres" + server_name = "tftest" + port = 27017 + username = "tftest" + password = "tftest" + database_name = "tftest" + ssl_mode = "require" + extra_connection_attributes = "" + postgres_settings { + after_connect_script = "SET search_path TO pg_catalog,public;" + capture_ddls = true + max_file_size = 1024 + execute_timeout = 100 + } +} +`, rName) +} + func testAccEndpointConfig_sqlServer(rName string) string { return fmt.Sprintf(` resource "aws_dms_endpoint" "test" { From 1ccf9d9249a149424acdc889559f2e692f938273 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Sat, 2 Dec 2023 09:59:02 +0100 Subject: [PATCH 043/438] update tests --- internal/service/dms/endpoint_test.go | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index a37415ef14a..1372464ec55 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -1368,7 +1368,15 @@ func TestAccDMSEndpoint_PostgreSQL_settings(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.after_connect_script", "SET search_path TO pg_catalog,public;"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.capture_ddls", "true"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.max_file_size", "1024"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.ddl_artifacts_schema", "true"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.execute_timeout", "100"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.fail_tasks_on_lob_truncation", "false"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_enable", "true"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_schema", "test"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.slot_name", "test"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.plugin_name", "pglogical"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.trim_space_in_char", "true"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.map_boolean_as_boolean", "true"), ), }, }, @@ -3841,9 +3849,17 @@ resource "aws_dms_endpoint" "test" { extra_connection_attributes = "" postgres_settings { after_connect_script = "SET search_path TO pg_catalog,public;" - capture_ddls = true - max_file_size = 1024 - execute_timeout = 100 + capture_ddls = true + max_file_size = 1024 + execute_timeout = 100 + ddl_artifacts_schema = true + fail_tasks_on_lob_truncation = false + heartbeat_enable = true + heartbeat_schema = "test" + slot_name = "test" + plugin_name = "pglogical" + trim_space_in_char = true + map_boolean_as_boolean = true } } `, rName) From 40e9d3b067c6f46f1e73232ab2652c0a623aca74 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Mon, 4 Dec 2023 18:53:33 +0100 Subject: [PATCH 044/438] should be ok --- internal/service/dms/endpoint.go | 65 ++++++++++++------ internal/service/dms/endpoint_test.go | 81 +++++++++++++++++++---- website/docs/r/dms_endpoint.html.markdown | 21 ++++++ 3 files changed, 132 insertions(+), 35 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 1f62af64ad3..47d38d9f560 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -359,12 +359,16 @@ func ResourceEndpoint() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "babelfish_database_name": { + Type: schema.TypeString, + Optional: true, + }, "capture_ddls": { Type: schema.TypeBool, Optional: true, }, - "max_file_size": { - Type: schema.TypeInt, + "database_mode": { + Type: schema.TypeString, Optional: true, }, "ddl_artifacts_schema": { @@ -383,28 +387,36 @@ func ResourceEndpoint() *schema.Resource { Type: schema.TypeBool, Optional: true, }, + "heartbeat_frequency": { + Type: schema.TypeInt, + Optional: true, + }, "heartbeat_schema": { Type: schema.TypeString, Optional: true, }, - "heartbeat_frequency": { - Type: schema.TypeInt, + "map_boolean_as_boolean": { + Type: schema.TypeBool, Optional: true, }, - "slot_name": { - Type: schema.TypeString, + "map_jsonb_as_clob": { + Type: schema.TypeBool, Optional: true, }, - "plugin_name": { + "map_long_varchar_as": { Type: schema.TypeString, Optional: true, }, - "trim_space_in_char": { - Type: schema.TypeBool, + "max_file_size": { + Type: schema.TypeInt, Optional: true, }, - "map_boolean_as_boolean": { - Type: schema.TypeBool, + "plugin_name": { + Type: schema.TypeString, + Optional: true, + }, + "slot_name": { + Type: schema.TypeString, Optional: true, }, }, @@ -2134,11 +2146,14 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { settings.AfterConnectScript = aws.String(v) } + if v, ok := tfMap["babelfish_database_name"].(string); ok && v != "" { + settings.BabelfishDatabaseName = aws.String(v) + } if v, ok := tfMap["capture_ddls"].(bool); ok { settings.CaptureDdls = aws.Bool(v) } - if v, ok := tfMap["max_file_size"].(int); ok { - settings.MaxFileSize = aws.Int64(int64(v)) + if v, ok := tfMap["database_mode"].(string); ok && v != "" { + settings.DatabaseMode = aws.String(v) } if v, ok := tfMap["ddl_artifacts_schema"].(string); ok && v != "" { settings.DdlArtifactsSchema = aws.String(v) @@ -2152,23 +2167,29 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti if v, ok := tfMap["heartbeat_enable"].(bool); ok { settings.HeartbeatEnable = aws.Bool(v) } + if v, ok := tfMap["heartbeat_frequency"].(int); ok { + settings.HeartbeatFrequency = aws.Int64(int64(v)) + } if v, ok := tfMap["heartbeat_schema"].(string); ok && v != "" { settings.HeartbeatSchema = aws.String(v) } - if v, ok := tfMap["heartbeat_frequency"].(int); ok { - settings.HeartbeatFrequency = aws.Int64(int64(v)) + if v, ok := tfMap["map_boolean_as_boolean"].(bool); ok { + settings.MapBooleanAsBoolean = aws.Bool(v) } - if v, ok := tfMap["slot_name"].(string); ok && v != "" { - settings.SlotName = aws.String(v) + if v, ok := tfMap["map_jsonb_as_clob"].(bool); ok { + settings.MapJsonbAsClob = aws.Bool(v) + } + if v, ok := tfMap["map_long_varchar_as"].(string); ok && v != "" { + settings.MapLongVarcharAs = aws.String(v) + } + if v, ok := tfMap["max_file_size"].(int); ok { + settings.MaxFileSize = aws.Int64(int64(v)) } if v, ok := tfMap["plugin_name"].(string); ok && v != "" { settings.PluginName = aws.String(v) } - if v, ok := tfMap["trim_space_in_char"].(bool); ok { - settings.TrimSpaceInChar = aws.Bool(v) - } - if v, ok := tfMap["map_boolean_as_boolean"].(bool); ok { - settings.MapBooleanAsBoolean = aws.Bool(v) + if v, ok := tfMap["slot_name"].(string); ok && v != "" { + settings.SlotName = aws.String(v) } return settings diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 1372464ec55..9b223b91f93 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -1349,7 +1349,7 @@ func TestAccDMSEndpoint_PostgreSQL_kmsKey(t *testing.T) { }) } -func TestAccDMSEndpoint_PostgreSQL_settings(t *testing.T) { +func TestAccDMSEndpoint_PostgreSQL_settings_source(t *testing.T) { ctx := acctest.Context(t) resourceName := "aws_dms_endpoint.test" rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1361,22 +1361,51 @@ func TestAccDMSEndpoint_PostgreSQL_settings(t *testing.T) { CheckDestroy: testAccCheckEndpointDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpointConfig_postgreSQLSettings(rName), + Config: testAccEndpointConfig_postgreSQLSourceSettings(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckEndpointExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "postgres_settings.#", "1"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.after_connect_script", "SET search_path TO pg_catalog,public;"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.capture_ddls", "true"), - resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.max_file_size", "1024"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.ddl_artifacts_schema", "true"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.execute_timeout", "100"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.fail_tasks_on_lob_truncation", "false"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_enable", "true"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_frequency", "5"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.heartbeat_schema", "test"), - resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.slot_name", "test"), - resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.plugin_name", "pglogical"), - resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.trim_space_in_char", "true"), resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.map_boolean_as_boolean", "true"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.map_jsonb_as_clob", "true"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.map_long_varchar_as", "wstring"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.max_file_size", "1024"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.plugin_name", "pglogical"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.slot_name", "test"), + ), + }, + }, + }) +} + +func TestAccDMSEndpoint_PostgreSQL_settings_target(t *testing.T) { + ctx := acctest.Context(t) + resourceName := "aws_dms_endpoint.test" + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, dms.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckEndpointDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccEndpointConfig_postgreSQLTargetSettings(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckEndpointExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.#", "1"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.after_connect_script", "SET search_path TO pg_catalog,public;"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.babelfish_database_name", "babelfish"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.database_mode", "babelfish"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.execute_timeout", "100"), + resource.TestCheckResourceAttr(resourceName, "postgres_settings.0.max_file_size", "1024"), ), }, }, @@ -3834,14 +3863,14 @@ resource "aws_dms_endpoint" "test" { `, rName) } -func testAccEndpointConfig_postgreSQLSettings(rName string) string { +func testAccEndpointConfig_postgreSQLSourceSettings(rName string) string { return fmt.Sprintf(` resource "aws_dms_endpoint" "test" { endpoint_id = %[1]q endpoint_type = "source" engine_name = "postgres" server_name = "tftest" - port = 27017 + port = 5432 username = "tftest" password = "tftest" database_name = "tftest" @@ -3850,16 +3879,42 @@ resource "aws_dms_endpoint" "test" { postgres_settings { after_connect_script = "SET search_path TO pg_catalog,public;" capture_ddls = true - max_file_size = 1024 - execute_timeout = 100 ddl_artifacts_schema = true + execute_timeout = 100 fail_tasks_on_lob_truncation = false heartbeat_enable = true + heartbeat_frequency = 5 heartbeat_schema = "test" - slot_name = "test" - plugin_name = "pglogical" - trim_space_in_char = true map_boolean_as_boolean = true + map_jsonb_as_clob = true + map_long_varchar_as = "wstring" + max_file_size = 1024 + plugin_name = "pglogical" + slot_name = "test" + } +} +`, rName) +} + +func testAccEndpointConfig_postgreSQLTargetSettings(rName string) string { + return fmt.Sprintf(` +resource "aws_dms_endpoint" "test" { + endpoint_id = %[1]q + endpoint_type = "target" + engine_name = "postgres" + server_name = "tftest" + port = 5432 + username = "tftest" + password = "tftest" + database_name = "tftest" + ssl_mode = "require" + extra_connection_attributes = "" + postgres_settings { + after_connect_script = "SET search_path TO pg_catalog,public;" + babelfish_database_name = "babelfish" + database_mode = "babelfish" + execute_timeout = 100 + max_file_size = 1024 } } `, rName) diff --git a/website/docs/r/dms_endpoint.html.markdown b/website/docs/r/dms_endpoint.html.markdown index 77903a417a4..898ea5c391b 100644 --- a/website/docs/r/dms_endpoint.html.markdown +++ b/website/docs/r/dms_endpoint.html.markdown @@ -58,6 +58,7 @@ The following arguments are optional: * `kinesis_settings` - (Optional) Configuration block for Kinesis settings. See below. * `mongodb_settings` - (Optional) Configuration block for MongoDB settings. See below. * `password` - (Optional) Password to be used to login to the endpoint database. +* `postgres_settings` - (Optional) Configuration block for Postgres settings. See below. * `pause_replication_tasks` - (Optional) Whether to pause associated running replication tasks, regardless if they are managed by Terraform, prior to modifying the endpoint. Only tasks paused by the resource will be restarted after the modification completes. Default is `false`. * `port` - (Optional) Port used by the endpoint database. * `redshift_settings` - (Optional) Configuration block for Redshift settings. See below. @@ -127,6 +128,26 @@ The following arguments are optional: * `extract_doc_id` - (Optional) Document ID. Use this setting when `nesting_level` is set to `none`. Default is `false`. * `nesting_level` - (Optional) Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode). +### postgres_settings +-> Additional information can be found in the [Using PostgreSQL as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html). + +* `after_connect_script` - (Optional) For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. +* `babelfish_database_name` - (Optional) The Babelfish for Aurora PostgreSQL database name for the endpoint. +* `capture_ddls` - (Optional) To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts. +* `database_mode` - (Optional) Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints. +* `ddl_artifacts_schema` - (Optional) Sets the schema in which the operational DDL database artifacts are created. Default is `public`. +* `execute_timeout` - (Optional) Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is `60`. +* `fail_tasks_on_lob_truncation` - (Optional) When set to `true`, this value causes a task to fail if the actual size of a LOB column is greater than the specified `LobMaxSize`. Default is `false`. +* `heartbeat_enable` - (Optional) The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source. +* `heartbeat_frequency` - (Optional) Sets the WAL heartbeat frequency (in minutes). Default value is `5`. +* `heartbeat_schema` - (Optional) Sets the schema in which the heartbeat artifacts are created. Default value is `public`. +* `map_boolean_as_boolean` - (Optional) You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is `false`. +* `map_jsonb_as_clob` - Optional When true, DMS migrates JSONB values as CLOB. +* `map_long_varchar_as` - Optional When true, DMS migrates LONG values as VARCHAR. +* `max_file_size` - (Optional) Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is `32,768 KB`. +* `plugin_name` - (Optional) Specifies the plugin to use to create a replication slot. Valid values: `pglogical`, `test_decoding`. +* `slot_name` - (Optional) Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance. + ### redis_settings -> Additional information can be found in the [Using Redis as a target for AWS Database Migration Service](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Redis.html). From fc621f9455770fa8a7141674e4d943845131dfe9 Mon Sep 17 00:00:00 2001 From: Patrick Rice Date: Mon, 4 Dec 2023 19:44:07 +0000 Subject: [PATCH 045/438] Add support for defining `serde` --- .changelog/34251.txt | 3 ++ internal/service/glue/classifier.go | 27 +++++++++++ internal/service/glue/classifier_test.go | 60 ++++++++++++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 .changelog/34251.txt diff --git a/.changelog/34251.txt b/.changelog/34251.txt new file mode 100644 index 00000000000..0bd5cd43578 --- /dev/null +++ b/.changelog/34251.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_glue_classier: Add support for defining `serde` when using `csv_classifier` +``` diff --git a/internal/service/glue/classifier.go b/internal/service/glue/classifier.go index 0c501929954..eb88ad45409 100644 --- a/internal/service/glue/classifier.go +++ b/internal/service/glue/classifier.go @@ -121,6 +121,18 @@ func ResourceClassifier() *schema.Resource { Type: schema.TypeString, Optional: true, }, + "serde": { + Type: schema.TypeString, + Optional: true, + // Computed is required because if nothing is set, the API + // will return "" which will be translated to "None" + Computed: true, + ValidateFunc: validation.StringInSlice([]string{ + "OpenCSVSerDe", + "LazySimpleSerDe", + "None", + }, false), + }, }, }, }, @@ -352,6 +364,10 @@ func expandCSVClassifierCreate(name string, m map[string]interface{}) *glue.Crea csvClassifier.CustomDatatypes = flex.ExpandStringList(v) } + if v, ok := m["serde"].(string); ok && v != "" { + csvClassifier.Serde = aws.String(v) + } + return csvClassifier } @@ -379,6 +395,10 @@ func expandCSVClassifierUpdate(name string, m map[string]interface{}) *glue.Upda csvClassifier.CustomDatatypes = flex.ExpandStringList(v) } + if v, ok := m["serde"].(string); ok && v != "" { + csvClassifier.Serde = aws.String(v) + } + return csvClassifier } @@ -466,6 +486,13 @@ func flattenCSVClassifier(csvClassifier *glue.CsvClassifier) []map[string]interf "quote_symbol": aws.StringValue(csvClassifier.QuoteSymbol), "custom_datatype_configured": aws.BoolValue(csvClassifier.CustomDatatypeConfigured), "custom_datatypes": aws.StringValueSlice(csvClassifier.CustomDatatypes), + "serde": aws.StringValue(csvClassifier.Serde), + } + + // When setting the value of `serde` to "None", it comes back as "" within the API + // This needs to be translated from "" or the validation will fail. + if m["serde"].(string) == "" { + m["serde"] = "None" } return []map[string]interface{}{m} diff --git a/internal/service/glue/classifier_test.go b/internal/service/glue/classifier_test.go index e016a079f2e..1e2d9a4b8f5 100644 --- a/internal/service/glue/classifier_test.go +++ b/internal/service/glue/classifier_test.go @@ -74,6 +74,49 @@ func TestAccGlueClassifier_csvClassifier(t *testing.T) { }) } +// Test to ensure the Serde value is set properly in a CsvClassifier block +func TestAccGlueClassifier_csvClassifierCustomSerde(t *testing.T) { + ctx := acctest.Context(t) + var classifier glue.Classifier + + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_glue_classifier.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, glue.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckClassifierDestroy(ctx), + Steps: []resource.TestStep{ + // Set the serde to the default value (None) + { + Config: testAccClassifierConfig_csvWithSerde(rName, false, "PRESENT", "|", false, "None"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClassifierExists(ctx, resourceName, &classifier), + resource.TestCheckResourceAttr(resourceName, "csv_classifier.#", "1"), + resource.TestCheckResourceAttr(resourceName, "csv_classifier.0.serde", "None"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + // Update the serde to a non-default value (OpenCSVSerDe) + { + Config: testAccClassifierConfig_csvWithSerde(rName, false, "PRESENT", ",", false, "OpenCSVSerDe"), + Check: resource.ComposeTestCheckFunc( + testAccCheckClassifierExists(ctx, resourceName, &classifier), + resource.TestCheckResourceAttr(resourceName, "csv_classifier.#", "1"), + resource.TestCheckResourceAttr(resourceName, "csv_classifier.0.serde", "OpenCSVSerDe"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccGlueClassifier_CSVClassifier_quoteSymbol(t *testing.T) { ctx := acctest.Context(t) var classifier glue.Classifier @@ -509,6 +552,23 @@ resource "aws_glue_classifier" "test" { `, rName, allowSingleColumn, containsHeader, delimiter, disableValueTrimming) } +func testAccClassifierConfig_csvWithSerde(rName string, allowSingleColumn bool, containsHeader string, delimiter string, disableValueTrimming bool, serde string) string { + return fmt.Sprintf(` +resource "aws_glue_classifier" "test" { + name = "%s" + + csv_classifier { + allow_single_column = "%t" + contains_header = "%s" + delimiter = "%s" + disable_value_trimming = "%t" + serde = "%s" + header = ["header_column1", "header_column2"] + } +} +`, rName, allowSingleColumn, containsHeader, delimiter, disableValueTrimming, serde) +} + func testAccClassifierConfig_csvQuoteSymbol(rName, symbol string) string { return fmt.Sprintf(` resource "aws_glue_classifier" "test" { From 89a365706a907eebef72301e23e8a89bb5fe20c9 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Mon, 4 Dec 2023 21:55:20 +0100 Subject: [PATCH 046/438] fix markdown and terraform test code --- internal/service/dms/endpoint_test.go | 4 ++-- website/docs/r/dms_endpoint.html.markdown | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 9b223b91f93..7fee7fbc6fd 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -3877,7 +3877,7 @@ resource "aws_dms_endpoint" "test" { ssl_mode = "require" extra_connection_attributes = "" postgres_settings { - after_connect_script = "SET search_path TO pg_catalog,public;" + after_connect_script = "SET search_path TO pg_catalog,public;" capture_ddls = true ddl_artifacts_schema = true execute_timeout = 100 @@ -3910,7 +3910,7 @@ resource "aws_dms_endpoint" "test" { ssl_mode = "require" extra_connection_attributes = "" postgres_settings { - after_connect_script = "SET search_path TO pg_catalog,public;" + after_connect_script = "SET search_path TO pg_catalog,public;" babelfish_database_name = "babelfish" database_mode = "babelfish" execute_timeout = 100 diff --git a/website/docs/r/dms_endpoint.html.markdown b/website/docs/r/dms_endpoint.html.markdown index 898ea5c391b..191b0af2070 100644 --- a/website/docs/r/dms_endpoint.html.markdown +++ b/website/docs/r/dms_endpoint.html.markdown @@ -129,6 +129,7 @@ The following arguments are optional: * `nesting_level` - (Optional) Specifies either document or table mode. Default is `none`. Valid values are `one` (table mode) and `none` (document mode). ### postgres_settings + -> Additional information can be found in the [Using PostgreSQL as a Source for AWS DMS documentation](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.PostgreSQL.html). * `after_connect_script` - (Optional) For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data. From f50d54d33a226b6ec577123d22965189b093570d Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Tue, 5 Dec 2023 09:30:28 +0100 Subject: [PATCH 047/438] fixed terraform test code --- internal/service/dms/endpoint_test.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 7fee7fbc6fd..7d479b933a9 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -3910,11 +3910,11 @@ resource "aws_dms_endpoint" "test" { ssl_mode = "require" extra_connection_attributes = "" postgres_settings { - after_connect_script = "SET search_path TO pg_catalog,public;" - babelfish_database_name = "babelfish" - database_mode = "babelfish" - execute_timeout = 100 - max_file_size = 1024 + after_connect_script = "SET search_path TO pg_catalog,public;" + babelfish_database_name = "babelfish" + database_mode = "babelfish" + execute_timeout = 100 + max_file_size = 1024 } } `, rName) From 5b3aa6dda1a42623f84263d3041aa09cf9db1149 Mon Sep 17 00:00:00 2001 From: Andrea Quintino Date: Tue, 5 Dec 2023 17:21:16 +0100 Subject: [PATCH 048/438] fixed terraform test code --- internal/service/dms/endpoint_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 7d479b933a9..aa8d130a80b 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -3876,7 +3876,7 @@ resource "aws_dms_endpoint" "test" { database_name = "tftest" ssl_mode = "require" extra_connection_attributes = "" - postgres_settings { + postgres_settings { after_connect_script = "SET search_path TO pg_catalog,public;" capture_ddls = true ddl_artifacts_schema = true @@ -3909,7 +3909,7 @@ resource "aws_dms_endpoint" "test" { database_name = "tftest" ssl_mode = "require" extra_connection_attributes = "" - postgres_settings { + postgres_settings { after_connect_script = "SET search_path TO pg_catalog,public;" babelfish_database_name = "babelfish" database_mode = "babelfish" From 0a0669d2f060696d5ae21597da875675ca33bd44 Mon Sep 17 00:00:00 2001 From: Noah Sparks Date: Wed, 6 Dec 2023 14:10:51 -0500 Subject: [PATCH 049/438] remove maxitems limit on appmesh_virtual_node backends --- internal/service/appmesh/virtual_node.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/appmesh/virtual_node.go b/internal/service/appmesh/virtual_node.go index dfaeaa7ff70..c6efc3929b7 100644 --- a/internal/service/appmesh/virtual_node.go +++ b/internal/service/appmesh/virtual_node.go @@ -266,7 +266,6 @@ func resourceVirtualNodeSpecSchema() *schema.Schema { Type: schema.TypeSet, Optional: true, MinItems: 0, - MaxItems: 50, Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "virtual_service": { From 9c5ca8d4cf62f18677878c8949b5b4cb00ca1312 Mon Sep 17 00:00:00 2001 From: Noah Sparks Date: Wed, 6 Dec 2023 14:53:38 -0500 Subject: [PATCH 050/438] add changelog entry --- .changelog/34774.txt | 3 + internal/service/appmesh/appmesh_test.go | 164 +++++++++++------------ 2 files changed, 85 insertions(+), 82 deletions(-) create mode 100644 .changelog/34774.txt diff --git a/.changelog/34774.txt b/.changelog/34774.txt new file mode 100644 index 00000000000..6bf9ca533d3 --- /dev/null +++ b/.changelog/34774.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/appmesh_virtual_node: Remove limit of 50 backends per virtual node +``` diff --git a/internal/service/appmesh/appmesh_test.go b/internal/service/appmesh/appmesh_test.go index 4f6bf3d3407..3163d6c0311 100644 --- a/internal/service/appmesh/appmesh_test.go +++ b/internal/service/appmesh/appmesh_test.go @@ -13,73 +13,73 @@ func TestAccAppMesh_serial(t *testing.T) { t.Parallel() testCases := map[string]map[string]func(t *testing.T){ - "GatewayRoute": { - "basic": testAccGatewayRoute_basic, - "disappears": testAccGatewayRoute_disappears, - "grpcRoute": testAccGatewayRoute_grpcRoute, - "grpcRouteTargetPort": testAccGatewayRoute_grpcRouteTargetPort, - "grpcRouteWithPort": testAccGatewayRoute_grpcRouteWithPort, - "httpRoute": testAccGatewayRoute_httpRoute, - "httpRouteTargetPort": testAccGatewayRoute_httpRouteTargetPort, - "httpRouteWithPath": testAccGatewayRoute_httpRouteWithPath, - "httpRouteWithPort": testAccGatewayRoute_httpRouteWithPort, - "http2Route": testAccGatewayRoute_http2Route, - "http2RouteTargetPort": testAccGatewayRoute_http2RouteTargetPort, - "http2RouteWithPort": testAccGatewayRoute_http2RouteWithPort, - "http2RouteWithQueryParameter": testAccGatewayRoute_http2RouteWithQueryParameter, - "tags": testAccGatewayRoute_tags, - "dataSourceBasic": testAccGatewayRouteDataSource_basic, - }, - "Mesh": { - "basic": testAccMesh_basic, - "disappears": testAccMesh_disappears, - "egressFilter": testAccMesh_egressFilter, - "tags": testAccMesh_tags, - "dataSourceBasic": testAccMeshDataSource_basic, - "dataSourceMeshOwner": testAccMeshDataSource_meshOwner, - "dataSourceSpecAndTagsSet": testAccMeshDataSource_specAndTagsSet, - "dataSourceShared": testAccMeshDataSource_shared, - }, - "Route": { - "disappears": testAccRoute_disappears, - "grpcRoute": testAccRoute_grpcRoute, - "grpcRouteWithPortMatch": testAccRoute_grpcRouteWithPortMatch, - "grpcRouteEmptyMatch": testAccRoute_grpcRouteEmptyMatch, - "grpcRouteTimeout": testAccRoute_grpcRouteTimeout, - "http2Route": testAccRoute_http2Route, - "http2RouteWithPathMatch": testAccRoute_http2RouteWithPathMatch, - "http2RouteWithPortMatch": testAccRoute_http2RouteWithPortMatch, - "http2RouteTimeout": testAccRoute_http2RouteTimeout, - "httpHeader": testAccRoute_httpHeader, - "httpRetryPolicy": testAccRoute_httpRetryPolicy, - "httpRoute": testAccRoute_httpRoute, - "httpRouteWithPortMatch": testAccRoute_httpRouteWithPortMatch, - "httpRouteWithQueryParameterMatch": testAccRoute_httpRouteWithQueryParameterMatch, - "httpRouteTimeout": testAccRoute_httpRouteTimeout, - "routePriority": testAccRoute_routePriority, - "tcpRoute": testAccRoute_tcpRoute, - "tcpRouteWithPortMatch": testAccRoute_tcpRouteWithPortMatch, - "tcpRouteTimeout": testAccRoute_tcpRouteTimeout, - "tags": testAccRoute_tags, - "dataSourceHTTP2Route": testAccRouteDataSource_http2Route, - "dataSourceHTTPRoute": testAccRouteDataSource_httpRoute, - "dataSourceGRPCRoute": testAccRouteDataSource_grpcRoute, - "dataSourceTCPRoute": testAccRouteDataSource_tcpRoute, - }, - "VirtualGateway": { - "basic": testAccVirtualGateway_basic, - "disappears": testAccVirtualGateway_disappears, - "backendDefaults": testAccVirtualGateway_BackendDefaults, - "backendDefaultsCertificate": testAccVirtualGateway_BackendDefaultsCertificate, - "listenerConnectionPool": testAccVirtualGateway_ListenerConnectionPool, - "listenerHealthChecks": testAccVirtualGateway_ListenerHealthChecks, - "listenerTls": testAccVirtualGateway_ListenerTLS, - "listenerValidation": testAccVirtualGateway_ListenerValidation, - "multiListenerValidation": testAccVirtualGateway_MultiListenerValidation, - "logging": testAccVirtualGateway_Logging, - "tags": testAccVirtualGateway_Tags, - "dataSourceBasic": testAccVirtualGatewayDataSource_basic, - }, + // "GatewayRoute": { + // "basic": testAccGatewayRoute_basic, + // "disappears": testAccGatewayRoute_disappears, + // "grpcRoute": testAccGatewayRoute_grpcRoute, + // "grpcRouteTargetPort": testAccGatewayRoute_grpcRouteTargetPort, + // "grpcRouteWithPort": testAccGatewayRoute_grpcRouteWithPort, + // "httpRoute": testAccGatewayRoute_httpRoute, + // "httpRouteTargetPort": testAccGatewayRoute_httpRouteTargetPort, + // "httpRouteWithPath": testAccGatewayRoute_httpRouteWithPath, + // "httpRouteWithPort": testAccGatewayRoute_httpRouteWithPort, + // "http2Route": testAccGatewayRoute_http2Route, + // "http2RouteTargetPort": testAccGatewayRoute_http2RouteTargetPort, + // "http2RouteWithPort": testAccGatewayRoute_http2RouteWithPort, + // "http2RouteWithQueryParameter": testAccGatewayRoute_http2RouteWithQueryParameter, + // "tags": testAccGatewayRoute_tags, + // "dataSourceBasic": testAccGatewayRouteDataSource_basic, + // }, + // "Mesh": { + // "basic": testAccMesh_basic, + // "disappears": testAccMesh_disappears, + // "egressFilter": testAccMesh_egressFilter, + // "tags": testAccMesh_tags, + // "dataSourceBasic": testAccMeshDataSource_basic, + // "dataSourceMeshOwner": testAccMeshDataSource_meshOwner, + // "dataSourceSpecAndTagsSet": testAccMeshDataSource_specAndTagsSet, + // "dataSourceShared": testAccMeshDataSource_shared, + // }, + // "Route": { + // "disappears": testAccRoute_disappears, + // "grpcRoute": testAccRoute_grpcRoute, + // "grpcRouteWithPortMatch": testAccRoute_grpcRouteWithPortMatch, + // "grpcRouteEmptyMatch": testAccRoute_grpcRouteEmptyMatch, + // "grpcRouteTimeout": testAccRoute_grpcRouteTimeout, + // "http2Route": testAccRoute_http2Route, + // "http2RouteWithPathMatch": testAccRoute_http2RouteWithPathMatch, + // "http2RouteWithPortMatch": testAccRoute_http2RouteWithPortMatch, + // "http2RouteTimeout": testAccRoute_http2RouteTimeout, + // "httpHeader": testAccRoute_httpHeader, + // "httpRetryPolicy": testAccRoute_httpRetryPolicy, + // "httpRoute": testAccRoute_httpRoute, + // "httpRouteWithPortMatch": testAccRoute_httpRouteWithPortMatch, + // "httpRouteWithQueryParameterMatch": testAccRoute_httpRouteWithQueryParameterMatch, + // "httpRouteTimeout": testAccRoute_httpRouteTimeout, + // "routePriority": testAccRoute_routePriority, + // "tcpRoute": testAccRoute_tcpRoute, + // "tcpRouteWithPortMatch": testAccRoute_tcpRouteWithPortMatch, + // "tcpRouteTimeout": testAccRoute_tcpRouteTimeout, + // "tags": testAccRoute_tags, + // "dataSourceHTTP2Route": testAccRouteDataSource_http2Route, + // "dataSourceHTTPRoute": testAccRouteDataSource_httpRoute, + // "dataSourceGRPCRoute": testAccRouteDataSource_grpcRoute, + // "dataSourceTCPRoute": testAccRouteDataSource_tcpRoute, + // }, + // "VirtualGateway": { + // "basic": testAccVirtualGateway_basic, + // "disappears": testAccVirtualGateway_disappears, + // "backendDefaults": testAccVirtualGateway_BackendDefaults, + // "backendDefaultsCertificate": testAccVirtualGateway_BackendDefaultsCertificate, + // "listenerConnectionPool": testAccVirtualGateway_ListenerConnectionPool, + // "listenerHealthChecks": testAccVirtualGateway_ListenerHealthChecks, + // "listenerTls": testAccVirtualGateway_ListenerTLS, + // "listenerValidation": testAccVirtualGateway_ListenerValidation, + // "multiListenerValidation": testAccVirtualGateway_MultiListenerValidation, + // "logging": testAccVirtualGateway_Logging, + // "tags": testAccVirtualGateway_Tags, + // "dataSourceBasic": testAccVirtualGatewayDataSource_basic, + // }, "VirtualNode": { "basic": testAccVirtualNode_basic, "disappears": testAccVirtualNode_disappears, @@ -99,21 +99,21 @@ func TestAccAppMesh_serial(t *testing.T) { "tags": testAccVirtualNode_tags, "dataSourceBasic": testAccVirtualNodeDataSource_basic, }, - "VirtualRouter": { - "basic": testAccVirtualRouter_basic, - "disappears": testAccVirtualRouter_disappears, - "multiListener": testAccVirtualRouter_multiListener, - "tags": testAccVirtualRouter_tags, - "dataSourceBasic": testAccVirtualRouterDataSource_basic, - }, - "VirtualService": { - "disappears": testAccVirtualService_disappears, - "virtualNode": testAccVirtualService_virtualNode, - "virtualRouter": testAccVirtualService_virtualRouter, - "tags": testAccVirtualService_tags, - "dataSourceVirtualNode": testAccVirtualServiceDataSource_virtualNode, - "dataSourceVirtualRouter": testAccVirtualServiceDataSource_virtualRouter, - }, + // "VirtualRouter": { + // "basic": testAccVirtualRouter_basic, + // "disappears": testAccVirtualRouter_disappears, + // "multiListener": testAccVirtualRouter_multiListener, + // "tags": testAccVirtualRouter_tags, + // "dataSourceBasic": testAccVirtualRouterDataSource_basic, + // }, + // "VirtualService": { + // "disappears": testAccVirtualService_disappears, + // "virtualNode": testAccVirtualService_virtualNode, + // "virtualRouter": testAccVirtualService_virtualRouter, + // "tags": testAccVirtualService_tags, + // "dataSourceVirtualNode": testAccVirtualServiceDataSource_virtualNode, + // "dataSourceVirtualRouter": testAccVirtualServiceDataSource_virtualRouter, + // }, } acctest.RunSerialTests2Levels(t, testCases, 0) From 1977be961d0fe9106a46d1820cd58d50e1f763b3 Mon Sep 17 00:00:00 2001 From: Noah Sparks Date: Wed, 6 Dec 2023 14:54:12 -0500 Subject: [PATCH 051/438] undo commented tests --- internal/service/appmesh/appmesh_test.go | 164 +++++++++++------------ 1 file changed, 82 insertions(+), 82 deletions(-) diff --git a/internal/service/appmesh/appmesh_test.go b/internal/service/appmesh/appmesh_test.go index 3163d6c0311..4f6bf3d3407 100644 --- a/internal/service/appmesh/appmesh_test.go +++ b/internal/service/appmesh/appmesh_test.go @@ -13,73 +13,73 @@ func TestAccAppMesh_serial(t *testing.T) { t.Parallel() testCases := map[string]map[string]func(t *testing.T){ - // "GatewayRoute": { - // "basic": testAccGatewayRoute_basic, - // "disappears": testAccGatewayRoute_disappears, - // "grpcRoute": testAccGatewayRoute_grpcRoute, - // "grpcRouteTargetPort": testAccGatewayRoute_grpcRouteTargetPort, - // "grpcRouteWithPort": testAccGatewayRoute_grpcRouteWithPort, - // "httpRoute": testAccGatewayRoute_httpRoute, - // "httpRouteTargetPort": testAccGatewayRoute_httpRouteTargetPort, - // "httpRouteWithPath": testAccGatewayRoute_httpRouteWithPath, - // "httpRouteWithPort": testAccGatewayRoute_httpRouteWithPort, - // "http2Route": testAccGatewayRoute_http2Route, - // "http2RouteTargetPort": testAccGatewayRoute_http2RouteTargetPort, - // "http2RouteWithPort": testAccGatewayRoute_http2RouteWithPort, - // "http2RouteWithQueryParameter": testAccGatewayRoute_http2RouteWithQueryParameter, - // "tags": testAccGatewayRoute_tags, - // "dataSourceBasic": testAccGatewayRouteDataSource_basic, - // }, - // "Mesh": { - // "basic": testAccMesh_basic, - // "disappears": testAccMesh_disappears, - // "egressFilter": testAccMesh_egressFilter, - // "tags": testAccMesh_tags, - // "dataSourceBasic": testAccMeshDataSource_basic, - // "dataSourceMeshOwner": testAccMeshDataSource_meshOwner, - // "dataSourceSpecAndTagsSet": testAccMeshDataSource_specAndTagsSet, - // "dataSourceShared": testAccMeshDataSource_shared, - // }, - // "Route": { - // "disappears": testAccRoute_disappears, - // "grpcRoute": testAccRoute_grpcRoute, - // "grpcRouteWithPortMatch": testAccRoute_grpcRouteWithPortMatch, - // "grpcRouteEmptyMatch": testAccRoute_grpcRouteEmptyMatch, - // "grpcRouteTimeout": testAccRoute_grpcRouteTimeout, - // "http2Route": testAccRoute_http2Route, - // "http2RouteWithPathMatch": testAccRoute_http2RouteWithPathMatch, - // "http2RouteWithPortMatch": testAccRoute_http2RouteWithPortMatch, - // "http2RouteTimeout": testAccRoute_http2RouteTimeout, - // "httpHeader": testAccRoute_httpHeader, - // "httpRetryPolicy": testAccRoute_httpRetryPolicy, - // "httpRoute": testAccRoute_httpRoute, - // "httpRouteWithPortMatch": testAccRoute_httpRouteWithPortMatch, - // "httpRouteWithQueryParameterMatch": testAccRoute_httpRouteWithQueryParameterMatch, - // "httpRouteTimeout": testAccRoute_httpRouteTimeout, - // "routePriority": testAccRoute_routePriority, - // "tcpRoute": testAccRoute_tcpRoute, - // "tcpRouteWithPortMatch": testAccRoute_tcpRouteWithPortMatch, - // "tcpRouteTimeout": testAccRoute_tcpRouteTimeout, - // "tags": testAccRoute_tags, - // "dataSourceHTTP2Route": testAccRouteDataSource_http2Route, - // "dataSourceHTTPRoute": testAccRouteDataSource_httpRoute, - // "dataSourceGRPCRoute": testAccRouteDataSource_grpcRoute, - // "dataSourceTCPRoute": testAccRouteDataSource_tcpRoute, - // }, - // "VirtualGateway": { - // "basic": testAccVirtualGateway_basic, - // "disappears": testAccVirtualGateway_disappears, - // "backendDefaults": testAccVirtualGateway_BackendDefaults, - // "backendDefaultsCertificate": testAccVirtualGateway_BackendDefaultsCertificate, - // "listenerConnectionPool": testAccVirtualGateway_ListenerConnectionPool, - // "listenerHealthChecks": testAccVirtualGateway_ListenerHealthChecks, - // "listenerTls": testAccVirtualGateway_ListenerTLS, - // "listenerValidation": testAccVirtualGateway_ListenerValidation, - // "multiListenerValidation": testAccVirtualGateway_MultiListenerValidation, - // "logging": testAccVirtualGateway_Logging, - // "tags": testAccVirtualGateway_Tags, - // "dataSourceBasic": testAccVirtualGatewayDataSource_basic, - // }, + "GatewayRoute": { + "basic": testAccGatewayRoute_basic, + "disappears": testAccGatewayRoute_disappears, + "grpcRoute": testAccGatewayRoute_grpcRoute, + "grpcRouteTargetPort": testAccGatewayRoute_grpcRouteTargetPort, + "grpcRouteWithPort": testAccGatewayRoute_grpcRouteWithPort, + "httpRoute": testAccGatewayRoute_httpRoute, + "httpRouteTargetPort": testAccGatewayRoute_httpRouteTargetPort, + "httpRouteWithPath": testAccGatewayRoute_httpRouteWithPath, + "httpRouteWithPort": testAccGatewayRoute_httpRouteWithPort, + "http2Route": testAccGatewayRoute_http2Route, + "http2RouteTargetPort": testAccGatewayRoute_http2RouteTargetPort, + "http2RouteWithPort": testAccGatewayRoute_http2RouteWithPort, + "http2RouteWithQueryParameter": testAccGatewayRoute_http2RouteWithQueryParameter, + "tags": testAccGatewayRoute_tags, + "dataSourceBasic": testAccGatewayRouteDataSource_basic, + }, + "Mesh": { + "basic": testAccMesh_basic, + "disappears": testAccMesh_disappears, + "egressFilter": testAccMesh_egressFilter, + "tags": testAccMesh_tags, + "dataSourceBasic": testAccMeshDataSource_basic, + "dataSourceMeshOwner": testAccMeshDataSource_meshOwner, + "dataSourceSpecAndTagsSet": testAccMeshDataSource_specAndTagsSet, + "dataSourceShared": testAccMeshDataSource_shared, + }, + "Route": { + "disappears": testAccRoute_disappears, + "grpcRoute": testAccRoute_grpcRoute, + "grpcRouteWithPortMatch": testAccRoute_grpcRouteWithPortMatch, + "grpcRouteEmptyMatch": testAccRoute_grpcRouteEmptyMatch, + "grpcRouteTimeout": testAccRoute_grpcRouteTimeout, + "http2Route": testAccRoute_http2Route, + "http2RouteWithPathMatch": testAccRoute_http2RouteWithPathMatch, + "http2RouteWithPortMatch": testAccRoute_http2RouteWithPortMatch, + "http2RouteTimeout": testAccRoute_http2RouteTimeout, + "httpHeader": testAccRoute_httpHeader, + "httpRetryPolicy": testAccRoute_httpRetryPolicy, + "httpRoute": testAccRoute_httpRoute, + "httpRouteWithPortMatch": testAccRoute_httpRouteWithPortMatch, + "httpRouteWithQueryParameterMatch": testAccRoute_httpRouteWithQueryParameterMatch, + "httpRouteTimeout": testAccRoute_httpRouteTimeout, + "routePriority": testAccRoute_routePriority, + "tcpRoute": testAccRoute_tcpRoute, + "tcpRouteWithPortMatch": testAccRoute_tcpRouteWithPortMatch, + "tcpRouteTimeout": testAccRoute_tcpRouteTimeout, + "tags": testAccRoute_tags, + "dataSourceHTTP2Route": testAccRouteDataSource_http2Route, + "dataSourceHTTPRoute": testAccRouteDataSource_httpRoute, + "dataSourceGRPCRoute": testAccRouteDataSource_grpcRoute, + "dataSourceTCPRoute": testAccRouteDataSource_tcpRoute, + }, + "VirtualGateway": { + "basic": testAccVirtualGateway_basic, + "disappears": testAccVirtualGateway_disappears, + "backendDefaults": testAccVirtualGateway_BackendDefaults, + "backendDefaultsCertificate": testAccVirtualGateway_BackendDefaultsCertificate, + "listenerConnectionPool": testAccVirtualGateway_ListenerConnectionPool, + "listenerHealthChecks": testAccVirtualGateway_ListenerHealthChecks, + "listenerTls": testAccVirtualGateway_ListenerTLS, + "listenerValidation": testAccVirtualGateway_ListenerValidation, + "multiListenerValidation": testAccVirtualGateway_MultiListenerValidation, + "logging": testAccVirtualGateway_Logging, + "tags": testAccVirtualGateway_Tags, + "dataSourceBasic": testAccVirtualGatewayDataSource_basic, + }, "VirtualNode": { "basic": testAccVirtualNode_basic, "disappears": testAccVirtualNode_disappears, @@ -99,21 +99,21 @@ func TestAccAppMesh_serial(t *testing.T) { "tags": testAccVirtualNode_tags, "dataSourceBasic": testAccVirtualNodeDataSource_basic, }, - // "VirtualRouter": { - // "basic": testAccVirtualRouter_basic, - // "disappears": testAccVirtualRouter_disappears, - // "multiListener": testAccVirtualRouter_multiListener, - // "tags": testAccVirtualRouter_tags, - // "dataSourceBasic": testAccVirtualRouterDataSource_basic, - // }, - // "VirtualService": { - // "disappears": testAccVirtualService_disappears, - // "virtualNode": testAccVirtualService_virtualNode, - // "virtualRouter": testAccVirtualService_virtualRouter, - // "tags": testAccVirtualService_tags, - // "dataSourceVirtualNode": testAccVirtualServiceDataSource_virtualNode, - // "dataSourceVirtualRouter": testAccVirtualServiceDataSource_virtualRouter, - // }, + "VirtualRouter": { + "basic": testAccVirtualRouter_basic, + "disappears": testAccVirtualRouter_disappears, + "multiListener": testAccVirtualRouter_multiListener, + "tags": testAccVirtualRouter_tags, + "dataSourceBasic": testAccVirtualRouterDataSource_basic, + }, + "VirtualService": { + "disappears": testAccVirtualService_disappears, + "virtualNode": testAccVirtualService_virtualNode, + "virtualRouter": testAccVirtualService_virtualRouter, + "tags": testAccVirtualService_tags, + "dataSourceVirtualNode": testAccVirtualServiceDataSource_virtualNode, + "dataSourceVirtualRouter": testAccVirtualServiceDataSource_virtualRouter, + }, } acctest.RunSerialTests2Levels(t, testCases, 0) From 900a92aeb357fd99b6bc3aa60af4204c5352f8ad Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Fri, 8 Dec 2023 16:11:16 +0100 Subject: [PATCH 052/438] feat: added resource tests and docs --- .../ssoadmin/application_access_scope.go | 193 ++++++++++++++++++ .../ssoadmin/application_access_scope_test.go | 153 ++++++++++++++ .../service/ssoadmin/service_package_gen.go | 4 + ...min_application_access_scope.html.markdown | 64 ++++++ 4 files changed, 414 insertions(+) create mode 100644 internal/service/ssoadmin/application_access_scope.go create mode 100644 internal/service/ssoadmin/application_access_scope_test.go create mode 100644 website/docs/r/ssoadmin_application_access_scope.html.markdown diff --git a/internal/service/ssoadmin/application_access_scope.go b/internal/service/ssoadmin/application_access_scope.go new file mode 100644 index 00000000000..228f6a918c3 --- /dev/null +++ b/internal/service/ssoadmin/application_access_scope.go @@ -0,0 +1,193 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssoadmin + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ssoadmin" + "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" +) + +// @SDKResource("aws_ssoadmin_application_access_scope") +func ResourceApplicationAccessScope() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceApplicationAccessScopeCreate, + ReadWithoutTimeout: resourceApplicationAccessScopeRead, + DeleteWithoutTimeout: resourceApplicationAccessScopeDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(10 * time.Minute), + Delete: schema.DefaultTimeout(10 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "application_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "authorized_targets": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: verify.ValidARN, + }, + }, + "scope": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceApplicationAccessScopeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + applicationARN := d.Get("application_arn").(string) + scope := d.Get("scope").(string) + id := ApplicationAccessScopeCreateResourceID(applicationARN, scope) + + input := &ssoadmin.PutApplicationAccessScopeInput{ + ApplicationArn: aws.String(applicationARN), + Scope: aws.String(scope), + } + + if v, ok := d.GetOk("authorized_targets"); ok { + input.AuthorizedTargets = flex.ExpandStringValueList(v.([]interface{})) + } + + _, err := conn.PutApplicationAccessScope(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating SSO Application Access Scope (%s): %s", id, err) + } + + d.SetId(id) + + return append(diags, resourceApplicationAccessScopeRead(ctx, d, meta)...) +} + +func resourceApplicationAccessScopeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + applicationARN, scope, err := ApplicationAccessScopeParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + output, err := FindApplicationAccessScopeByScopeAndApplicationARN(ctx, conn, applicationARN, scope) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] SSO Application Access Scope (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading SSO Application Access Scope (%s): %s", d.Id(), err) + } + + d.Set("application_arn", applicationARN) + d.Set("scope", output.Scope) + d.Set("authorized_targets", output.AuthorizedTargets) + + return diags +} + +func resourceApplicationAccessScopeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + applicationARN, scope, err := ApplicationAccessScopeParseResourceID(d.Id()) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + log.Printf("[INFO] Deleting SSO Application Access Scope: %s", d.Id()) + _, err = conn.DeleteApplicationAccessScope(ctx, &ssoadmin.DeleteApplicationAccessScopeInput{ + ApplicationArn: aws.String(applicationARN), + Scope: aws.String(scope), + }) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting SSO Application Access Scope (%s): %s", d.Id(), err) + } + + return diags +} + +const applicationAccessScopeIDSeparator = "," + +func ApplicationAccessScopeCreateResourceID(applicationARN, scope string) string { + parts := []string{applicationARN, scope} + id := strings.Join(parts, applicationAccessScopeIDSeparator) + + return id +} + +func ApplicationAccessScopeParseResourceID(id string) (string, string, error) { + parts := strings.Split(id, applicationAccessScopeIDSeparator) + + if len(parts) == 2 && parts[0] != "" && parts[1] != "" { + return parts[0], parts[1], nil + } + + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected APPLICATION_ARN%[2]sSCOPE%[2]s", id, applicationAccessScopeIDSeparator) +} + +func FindApplicationAccessScopeByScopeAndApplicationARN(ctx context.Context, conn *ssoadmin.Client, applicationARN, scope string) (*ssoadmin.GetApplicationAccessScopeOutput, error) { + input := &ssoadmin.GetApplicationAccessScopeInput{ + ApplicationArn: aws.String(applicationARN), + Scope: aws.String(scope), + } + + output, err := conn.GetApplicationAccessScope(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} diff --git a/internal/service/ssoadmin/application_access_scope_test.go b/internal/service/ssoadmin/application_access_scope_test.go new file mode 100644 index 00000000000..55a294ce555 --- /dev/null +++ b/internal/service/ssoadmin/application_access_scope_test.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssoadmin_test + +import ( + "context" + "fmt" + "testing" + + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSOAdminApplicationAccessScope_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssoadmin_application_access_scope.test" + applicationResourceName := "aws_ssoadmin_application.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckApplicationAccessScopeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccApplicationAccessScopeConfig_basic(rName, "sso:account:access"), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationAccessScopeExists(ctx, resourceName), + resource.TestCheckResourceAttrPair(resourceName, "application_arn", applicationResourceName, "application_arn"), + resource.TestCheckResourceAttr(resourceName, "scope", "sso:account:access"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateIdFunc: testAccApplicationAccessScopeImportStateIdFunc(resourceName), + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSOAdminApplicationAccessScope_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssoadmin_application_access_scope.test" + applicationResourceName := "aws_ssoadmin_application.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckApplicationAccessScopeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccApplicationAccessScopeConfig_basic(rName, "sso:account:access"), + Check: resource.ComposeTestCheckFunc( + testAccCheckApplicationAccessScopeExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, ssoadmin.ResourceApplicationAccessScope(), applicationResourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckApplicationAccessScopeExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) + + applicationARN, scope, err := ssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + if err != nil { + return err + } + + _, err = ssoadmin.FindApplicationAccessScopeByScopeAndApplicationARN(ctx, conn, applicationARN, scope) + + return err + } +} + +func testAccCheckApplicationAccessScopeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ssoadmin_application_access_scope" { + continue + } + + var applicationARN, scope, err = ssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + if err != nil { + return err + } + + _, err = ssoadmin.FindApplicationAccessScopeByScopeAndApplicationARN(ctx, conn, applicationARN, scope) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("SSO Application Access Scope %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccApplicationAccessScopeImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { + return func(s *terraform.State) (string, error) { + rs, ok := s.RootModule().Resources[resourceName] + if !ok { + return "", fmt.Errorf("Not Found: %s", resourceName) + } + + return fmt.Sprintf("%s,%s", rs.Primary.Attributes["application_arn"], rs.Primary.Attributes["scope"]), nil + } +} + +func testAccApplicationAccessScopeConfig_basic(rName, scope string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_application" "test" { + name = %[1]q + application_provider_arn = %[2]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] +} + +resource "aws_ssoadmin_application_access_scope" "test" { + application_arn = aws_ssoadmin_application.test.application_arn + authorized_targets = [aws_ssoadmin_application.test.application_arn] + scope = %[3]q +} +`, rName, testAccApplicationProviderARN, scope) +} diff --git a/internal/service/ssoadmin/service_package_gen.go b/internal/service/ssoadmin/service_package_gen.go index 882e10d2b48..839d43a4be2 100644 --- a/internal/service/ssoadmin/service_package_gen.go +++ b/internal/service/ssoadmin/service_package_gen.go @@ -62,6 +62,10 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Factory: ResourceAccountAssignment, TypeName: "aws_ssoadmin_account_assignment", }, + { + Factory: ResourceApplicationAccessScope, + TypeName: "aws_ssoadmin_application_access_scope", + }, { Factory: ResourceCustomerManagedPolicyAttachment, TypeName: "aws_ssoadmin_customer_managed_policy_attachment", diff --git a/website/docs/r/ssoadmin_application_access_scope.html.markdown b/website/docs/r/ssoadmin_application_access_scope.html.markdown new file mode 100644 index 00000000000..c65ea868bc8 --- /dev/null +++ b/website/docs/r/ssoadmin_application_access_scope.html.markdown @@ -0,0 +1,64 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_application_access_scope" +description: |- + Terraform resource for managing an AWS SSO Admin Application Access Scope. +--- +# Resource: aws_ssoadmin_application_access_scope + +Terraform resource for managing an AWS SSO Admin Application Access Scope. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_ssoadmin_instances" "example" {} + +resource "aws_ssoadmin_application" "example" { + name = "example" + application_provider_arn = "arn:aws:sso::aws:applicationProvider/custom" + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] +} + +resource "aws_ssoadmin_application_access_scope" "test" { + application_arn = aws_ssoadmin_application.test.application_arn + authorized_targets = [ "arn:aws:sso::012345678901:application/ssoins-012345678901/apl-012345678901" ] + scope = "sso:account:access" +} +``` + +## Argument Reference + +The following arguments are required: + +* `application_arn` - (Required) Specifies the ARN of the application with the access scope with the targets to add or update. +* `scope` - (Required) Specifies the name of the access scope to be associated with the specified targets. + +The following arguments are optional: + +* `authorized_targets` - (Optional) Specifies an array list of ARNs that represent the authorized targets for this access scope. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `id` - ARN of the application. + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Application Access Scope using the `id`. For example: + +```terraform +import { + to = aws_ssoadmin_application_access_scope.example + id = "arn:aws:sso::012345678901:application/ssoins-012345678901/apl-012345678901,sso:account:access" +} +``` + +Using `terraform import`, import SSO Admin Application Access Scope using the `id`. For example: + +```console +% terraform import aws_ssoadmin_application_access_scope.example arn:aws:sso::012345678901:application/ssoins-012345678901/apl-012345678901,sso:account:access +``` From 6d41c27d409f6790ae0bc70b0117347742da032b Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Fri, 8 Dec 2023 16:24:59 +0100 Subject: [PATCH 053/438] feat: fix dissapear test --- internal/service/ssoadmin/application_access_scope.go | 2 +- internal/service/ssoadmin/application_access_scope_test.go | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/service/ssoadmin/application_access_scope.go b/internal/service/ssoadmin/application_access_scope.go index 228f6a918c3..325a6c9d770 100644 --- a/internal/service/ssoadmin/application_access_scope.go +++ b/internal/service/ssoadmin/application_access_scope.go @@ -163,7 +163,7 @@ func ApplicationAccessScopeParseResourceID(id string) (string, string, error) { return parts[0], parts[1], nil } - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected APPLICATION_ARN%[2]sSCOPE%[2]s", id, applicationAccessScopeIDSeparator) + return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected APPLICATION_ARN%[2]sSCOPE", id, applicationAccessScopeIDSeparator) } func FindApplicationAccessScopeByScopeAndApplicationARN(ctx context.Context, conn *ssoadmin.Client, applicationARN, scope string) (*ssoadmin.GetApplicationAccessScopeOutput, error) { diff --git a/internal/service/ssoadmin/application_access_scope_test.go b/internal/service/ssoadmin/application_access_scope_test.go index 55a294ce555..534d378ef7f 100644 --- a/internal/service/ssoadmin/application_access_scope_test.go +++ b/internal/service/ssoadmin/application_access_scope_test.go @@ -52,7 +52,6 @@ func TestAccSSOAdminApplicationAccessScope_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ssoadmin_application_access_scope.test" - applicationResourceName := "aws_ssoadmin_application.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -64,7 +63,7 @@ func TestAccSSOAdminApplicationAccessScope_disappears(t *testing.T) { Config: testAccApplicationAccessScopeConfig_basic(rName, "sso:account:access"), Check: resource.ComposeTestCheckFunc( testAccCheckApplicationAccessScopeExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, ssoadmin.ResourceApplicationAccessScope(), applicationResourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, ssoadmin.ResourceApplicationAccessScope(), resourceName), ), ExpectNonEmptyPlan: true, }, From 6956422bfad4ce76eadf3d1dc3248c43bb365df0 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Fri, 8 Dec 2023 16:26:37 +0100 Subject: [PATCH 054/438] chore: added changelog --- .changelog/34811.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34811.txt diff --git a/.changelog/34811.txt b/.changelog/34811.txt new file mode 100644 index 00000000000..9089df669d9 --- /dev/null +++ b/.changelog/34811.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_ssoadmin_application_access_scope +``` \ No newline at end of file From 914d70eb71423a2cadc9622ca474315e3fcb0bee Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Fri, 8 Dec 2023 16:29:13 +0100 Subject: [PATCH 055/438] chore: fmt examples --- website/docs/r/ssoadmin_application_access_scope.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ssoadmin_application_access_scope.html.markdown b/website/docs/r/ssoadmin_application_access_scope.html.markdown index c65ea868bc8..d00481b9a0c 100644 --- a/website/docs/r/ssoadmin_application_access_scope.html.markdown +++ b/website/docs/r/ssoadmin_application_access_scope.html.markdown @@ -24,7 +24,7 @@ resource "aws_ssoadmin_application" "example" { resource "aws_ssoadmin_application_access_scope" "test" { application_arn = aws_ssoadmin_application.test.application_arn - authorized_targets = [ "arn:aws:sso::012345678901:application/ssoins-012345678901/apl-012345678901" ] + authorized_targets = ["arn:aws:sso::012345678901:application/ssoins-012345678901/apl-012345678901"] scope = "sso:account:access" } ``` From bd356b874e997b8e9e628ca2dfb417094efa1d63 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Fri, 8 Dec 2023 17:55:02 +0100 Subject: [PATCH 056/438] chore: correted changelog --- .changelog/34811.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/34811.txt b/.changelog/34811.txt index 9089df669d9..4e8547747df 100644 --- a/.changelog/34811.txt +++ b/.changelog/34811.txt @@ -1,3 +1,3 @@ -```release-note:new-data-source +```release-note:new-resource aws_ssoadmin_application_access_scope ``` \ No newline at end of file From a021529946157c30e2072b4d1cad659323f20f55 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Fri, 8 Dec 2023 19:29:36 +0100 Subject: [PATCH 057/438] fmt: fixed docs --- .../docs/r/ssoadmin_application_access_scope.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/ssoadmin_application_access_scope.html.markdown b/website/docs/r/ssoadmin_application_access_scope.html.markdown index d00481b9a0c..cf263e39d8f 100644 --- a/website/docs/r/ssoadmin_application_access_scope.html.markdown +++ b/website/docs/r/ssoadmin_application_access_scope.html.markdown @@ -22,8 +22,8 @@ resource "aws_ssoadmin_application" "example" { instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] } -resource "aws_ssoadmin_application_access_scope" "test" { - application_arn = aws_ssoadmin_application.test.application_arn +resource "aws_ssoadmin_application_access_scope" "example" { + application_arn = aws_ssoadmin_application.example.application_arn authorized_targets = ["arn:aws:sso::012345678901:application/ssoins-012345678901/apl-012345678901"] scope = "sso:account:access" } From 3f1bc775819d3cc37e19cc528320deec09dc9fcf Mon Sep 17 00:00:00 2001 From: David Hwang Date: Fri, 8 Dec 2023 15:02:11 -0500 Subject: [PATCH 058/438] Adds kx-dataview resource --- internal/service/finspace/kx_dataview.go | 427 ++++++++++++++++++ internal/service/finspace/kx_dataview_test.go | 241 ++++++++++ .../service/finspace/service_package_gen.go | 8 + .../docs/r/finspace_kx_dataview.html.markdown | 91 ++++ 4 files changed, 767 insertions(+) create mode 100644 internal/service/finspace/kx_dataview.go create mode 100644 internal/service/finspace/kx_dataview_test.go create mode 100644 website/docs/r/finspace_kx_dataview.html.markdown diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go new file mode 100644 index 00000000000..b18aaa8f7e8 --- /dev/null +++ b/internal/service/finspace/kx_dataview.go @@ -0,0 +1,427 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace + +import ( + "context" + "errors" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" + "log" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" +) + +// @SDKResource("aws_finspace_kx_dataview", name="Kx Dataview") +// @Tags(identifierAttribute="arn") +func ResourceKxDataview() *schema.Resource { + + return &schema.Resource{ + CreateWithoutTimeout: resourceKxDataviewCreate, + ReadWithoutTimeout: resourceKxDataviewRead, + UpdateWithoutTimeout: resourceKxDataviewUpdate, + DeleteWithoutTimeout: resourceKxDataviewDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "database_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "auto_update": { + Type: schema.TypeBool, + ForceNew: true, + Required: true, + }, + "changeset_id": { + Type: schema.TypeString, + Optional: true, + }, + "az_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxAzMode](), + }, + "availability_zone_id": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "segment_configurations": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "volume_name": { + Type: schema.TypeString, + Required: true, + }, + "db_paths": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Required: true, + }, + }, + }, + Optional: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxDataview = "Kx Dataview" + kxDataviewIdPartCount = 3 +) + +func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + idParts := []string{ + d.Get("environment_id").(string), + d.Get("database_name").(string), + d.Get("name").(string), + } + + rId, err := flex.FlattenResourceId(idParts, kxDataviewIdPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err)...) + } + d.SetId(rId) + + in := &finspace.CreateKxDataviewInput{ + DatabaseName: aws.String(d.Get("database_name").(string)), + DataviewName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + AutoUpdate: *aws.Bool(d.Get("auto_update").(bool)), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + ClientToken: aws.String(id.UniqueId()), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("description"); ok { + in.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("changeset_id"); ok { + in.ChangesetId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("availability_zone_id"); ok { + in.AvailabilityZoneId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("segment_configurations"); ok && len(v.([]interface{})) > 0 { + in.SegmentConfigurations = expandSegmentConfigurations(v.([]interface{})) + } + + out, err := conn.CreateKxDataview(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err)...) + } + if out == nil || out.DataviewName == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output"))...) + } + if _, err := waitKxDataviewCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err)...) + } + + return append(diags, resourceKxDataviewRead(ctx, d, meta)...) +} + +func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxDataviewById(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxDataview (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + } + d.Set("name", out.DataviewName) + d.Set("description", out.Description) + d.Set("auto_update", out.AutoUpdate) + d.Set("changeset_id", out.ChangesetId) + d.Set("availability_zone_id", out.AvailabilityZoneId) + d.Set("status", out.Status) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("database_name", out.DatabaseName) + d.Set("environment_id", out.EnvironmentId) + d.Set("az_mode", out.AzMode) + if err := d.Set("segment_configurations", flattenSegmentConfigurations(out.SegmentConfigurations)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + } + + return diags +} + +func resourceKxDataviewUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + in := &finspace.UpdateKxDataviewInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + DatabaseName: aws.String(d.Get("database_name").(string)), + DataviewName: aws.String(d.Get("name").(string)), + ClientToken: aws.String(id.UniqueId()), + } + + if v, ok := d.GetOk("changeset_id"); ok && d.HasChange("changeset_id") && d.Get("auto_update").(bool) != true { + in.ChangesetId = aws.String(v.(string)) + } + + if v, ok := d.GetOk("segment_configurations"); ok && len(v.([]interface{})) > 0 && d.HasChange("segment_configurations") { + in.SegmentConfigurations = expandSegmentConfigurations(v.([]interface{})) + } + + if _, err := conn.UpdateKxDataview(ctx, in); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxDataview, d.Get("name").(string), err)...) + } + + if _, err := waitKxDataviewUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForUpdate, ResNameKxDataview, d.Get("name").(string), err)...) + } + + return append(diags, resourceKxDataviewRead(ctx, d, meta)...) +} + +func resourceKxDataviewDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + _, err := conn.DeleteKxDataview(ctx, &finspace.DeleteKxDataviewInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + DatabaseName: aws.String(d.Get("database_name").(string)), + DataviewName: aws.String(d.Get("name").(string)), + ClientToken: aws.String(id.UniqueId()), + }) + + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxDataview, d.Get("name").(string), err)...) + } + + if _, err := waitKxDataviewDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxDataview, d.Id(), err)...) + } + return diags +} + +func findKxDataviewById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxDataviewOutput, error) { + idParts, err := flex.ExpandResourceId(id, kxDataviewIdPartCount, false) + if err != nil { + return nil, err + } + + in := &finspace.GetKxDataviewInput{ + EnvironmentId: aws.String(idParts[0]), + DatabaseName: aws.String(idParts[1]), + DataviewName: aws.String(idParts[2]), + } + + out, err := conn.GetKxDataview(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + + } + return nil, err + } + + if out == nil || out.DataviewName == nil { + return nil, tfresource.NewEmptyResultError(in) + } + return out, nil +} + +func waitKxDataviewCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxDataviewOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxDataviewStatusCreating), + Target: enum.Slice(types.KxDataviewStatusActive), + Refresh: statusKxDataview(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxDataviewOutput); ok { + return out, err + } + return nil, err +} + +func waitKxDataviewUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxDataviewOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxDataviewStatusUpdating), + Target: enum.Slice(types.KxDataviewStatusActive), + Refresh: statusKxDataview(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if out, ok := outputRaw.(*finspace.GetKxDataviewOutput); ok { + return out, err + } + return nil, err +} + +func waitKxDataviewDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxDataviewOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxDataviewStatusDeleting), + Target: []string{}, + Refresh: statusKxDataview(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxDataviewOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxDataview(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxDataviewById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + if err != nil { + return nil, "", err + } + return out, string(out.Status), nil + } +} + +func expandDbPath(tfList []interface{}) []string { + if tfList == nil { + return nil + } + var s []string + + for _, v := range tfList { + s = append(s, v.(string)) + } + return s +} + +func expandSegmentConfigurations(tfList []interface{}) []types.KxDataviewSegmentConfiguration { + if tfList == nil { + return nil + } + var s []types.KxDataviewSegmentConfiguration + + for _, v := range tfList { + m := v.(map[string]interface{}) + s = append(s, types.KxDataviewSegmentConfiguration{ + VolumeName: aws.String(m["volume_name"].(string)), + DbPaths: expandDbPath(m["db_paths"].([]interface{})), + }) + } + + return s +} +func flattenSegmentConfiguration(apiObject *types.KxDataviewSegmentConfiguration) map[string]interface{} { + if apiObject == nil { + return nil + } + m := map[string]interface{}{} + if v := apiObject.VolumeName; aws.ToString(v) != "" { + m["volume_name"] = aws.ToString(v) + } + if v := apiObject.DbPaths; v != nil { + m["db_paths"] = v + } + return m +} + +func flattenSegmentConfigurations(apiObjects []types.KxDataviewSegmentConfiguration) []interface{} { + if apiObjects == nil { + return nil + } + var l []interface{} + for _, apiObject := range apiObjects { + l = append(l, flattenSegmentConfiguration(&apiObject)) + } + return l +} diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go new file mode 100644 index 00000000000..0a26282997d --- /dev/null +++ b/internal/service/finspace/kx_dataview_test.go @@ -0,0 +1,241 @@ +package finspace_test + +import ( + "context" + "errors" + "fmt" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" + "testing" +) + +func TestAccFinSpaceKxDataview_basic(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + ctx := acctest.Context(t) + var kxdataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} +func TestAccFinSpaceKxDataview_disappears(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + ctx := acctest.Context(t) + var kxdataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxDataview(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccKxDataviewConfigBase(rName string) string { + return fmt.Sprintf(` +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} + +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} +`, rName) +} +func testAccKxDataviewConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName)) +} + +func testAccCheckKxDataviewExists(ctx context.Context, name string, kxdataview *finspace.GetKxDataviewOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ + DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + DataviewName: aws.String(rs.Primary.Attributes["name"]), + }) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) + } + + *kxdataview = *resp + + return nil + } +} + +func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_dataview" { + continue + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + _, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ + DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + DataviewName: aws.String(rs.Primary.Attributes["name"]), + }) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) + } + return nil + } +} + +func testAccKxDataviewVolumeBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName) +} + +func testAccKxDataviewConfig_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + testAccKxDataviewVolumeBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + + segment_configurations { + db_paths = ["/*"] + volume_name = aws_finspace_kx_volume.test.name + } +} +`, rName)) +} + +func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + ctx := acctest.Context(t) + + var kxdataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_withKxVolume(rName), + + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), + ), + }, + }, + }) +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450e..b34d275706c 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -60,6 +60,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxDataview, + TypeName: "aws_finspace_kx_dataview", + Name: "Kx Dataview", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown new file mode 100644 index 00000000000..70a2b762c90 --- /dev/null +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -0,0 +1,91 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_dataview" +description: |- + Terraform resource for managing an AWS FinSpace Kx Dataviewk. +--- + +# Resource: aws_finspace_dataview + +Terraform resource for managing an AWS FinSpace Kx Dataview. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_dataview" "example" { + name = "my-tf-kx-dataview" + environment_id = aws_finspace_kx_environment.example.id + database_name = aws_finspace_kx_database.example.name + availability_zone_id = "use1-az2" + description = "Terraform managed Kx Dataview" + az_mode = "SINGLE" + auto_update = true + + segment_configurations { + volume_name = aws_finspace_kx_volume.example.name + db_paths = ["/*"] + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: + * SINGLE - Assigns one availability zone per cluster. + * MULTI - Assigns all the availability zones per cluster. +* `database_name` - (Required) The name of the database where you want to create a dataview. +* `environment_id` - (Required) Unique identifier for the KX environment. +* `name` - (Required) A unique identifier for the dataview. + +The following arguments are optional: +* `auto_update` - (Optional) The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false. +* `availability_zone_id` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to. +* `changeset_id` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data. +* `description` - (Optional) A description for the dataview. +* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations). +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### segment_configurations +* `db_paths` - (Required) The database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. +* `volume_name` - (Required) The name of the volume that you want to attach to a dataview. This volume must be in the same availability zone as the dataview that you are attaching to. + + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX dataview. +* `created_timestamp` - Timestamp at which the dataview was created in FinSpace. Value determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `id` - A comma-delimited string joining environment ID, database name and dataview name. +* `last_modified_timestamp` - The last time that the dataview was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `30m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx Dataview using the `id` (environment ID, database name and dataview name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_dataview.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database,my-tf-kx-dataview" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Cluster using the `id` (environment ID and cluster name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_dataview.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-database,my-tf-kx-dataview +``` From 3a043cadd6bebdd6cc5488b43dabce5404e0a2b2 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 12:37:46 -0800 Subject: [PATCH 059/438] Push all kx volume changes for PR. --- internal/service/finspace/kx_volume.go | 504 ++++++++++++++++++ internal/service/finspace/kx_volume_test.go | 278 ++++++++++ .../service/finspace/service_package_gen.go | 8 + .../docs/r/finspace_kx_volume.html.markdown | 98 ++++ 4 files changed, 888 insertions(+) create mode 100644 internal/service/finspace/kx_volume.go create mode 100644 internal/service/finspace/kx_volume_test.go create mode 100644 website/docs/r/finspace_kx_volume.html.markdown diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go new file mode 100644 index 00000000000..0edbc919ffc --- /dev/null +++ b/internal/service/finspace/kx_volume.go @@ -0,0 +1,504 @@ +// // Copyright (c) HashiCorp, Inc. +// // SPDX-License-Identifier: MPL-2.0 +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_volume", name="Kx Volume") +// @Tags(identifierAttribute="arn") +func ResourceKxVolume() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxVolumeCreate, + ReadWithoutTimeout: resourceKxVolumeRead, + UpdateWithoutTimeout: resourceKxVolumeUpdate, + DeleteWithoutTimeout: resourceKxVolumeDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(45 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + + "availability_zones": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Required: true, + ForceNew: true, + }, + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "az_mode": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxAzMode](), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "nas1_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "size": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntBetween(1200, 33600), + }, + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxNAS1Type](), + }, + }, + }, + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxVolume = "Kx Volume" + kxVolumeIDPartCount = 2 +) + +func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + volumeName := d.Get("name").(string) + idParts := []string{ + environmentId, + volumeName, + } + rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err)...) + } + d.SetId(rID) + + in := &finspace.CreateKxVolumeInput{ + ClientToken: aws.String(id.UniqueId()), + AvailabilityZoneIds: flex.ExpandStringValueList(d.Get("availability_zones").([]interface{})), + EnvironmentId: aws.String(environmentId), + VolumeType: types.KxVolumeType(d.Get("type").(string)), + VolumeName: aws.String(volumeName), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("description"); ok { + in.Description = aws.String(v.(string)) + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + } + + // TODO: add flatten/expand functions for remaining parameters + + out, err := conn.CreateKxVolume(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err)...) + } + + if out == nil || out.VolumeName == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output"))...) + } + + if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err)...) + } + + // The CreateKxVolume API currently fails to tag the Volume when the + // Tags field is set. Until the API is fixed, tag after creation instead. + if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err)...) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxVolumeByID(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err)...) + } + + d.Set("arn", out.VolumeArn) + d.Set("name", out.VolumeName) + d.Set("description", out.Description) + d.Set("type", out.VolumeType) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("az_mode", out.AzMode) + d.Set("description", out.Description) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) + + if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + + if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + + parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + updateVolume := false + + in := &finspace.UpdateKxVolumeInput{ + EnvironmentId: aws.String(d.Get("environment_id").(string)), + VolumeName: aws.String(d.Get("name").(string)), + } + + if v, ok := d.GetOk("description"); ok && d.HasChanges("description") { + in.Description = aws.String(v.(string)) + updateVolume = true + } + + if v, ok := d.GetOk("nas1_configuration"); ok && len(v.([]interface{})) > 0 && d.HasChanges("nas1_configuration") { + in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) + updateVolume = true + } + + if !updateVolume { + return diags + } + + log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) + + if _, err := conn.UpdateKxVolume(ctx, in); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + } + if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + } + + return append(diags, resourceKxVolumeRead(ctx, d, meta)...) +} + +func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) + _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ + VolumeName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err)...) + } + + _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err)...) + } + + return diags +} + +func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { //nolint:unparam + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), + Target: enum.Slice(types.KxVolumeStatusActive), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxVolumeStatusDeleting), + Target: enum.Slice(types.KxVolumeStatusDeleted), + Refresh: statusKxVolume(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxVolumeOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxVolumeByID(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func findKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { + parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) + if err != nil { + return nil, err + } + + in := &finspace.GetKxVolumeInput{ + EnvironmentId: aws.String(parts[0]), + VolumeName: aws.String(parts[1]), + } + + out, err := conn.GetKxVolume(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.VolumeArn == nil { + return nil, tfresource.NewEmptyResultError(in) + } + + return out, nil +} + +func expandNas1Configuration(tfList []interface{}) *types.KxNAS1Configuration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxNAS1Configuration{} + + if v, ok := tfMap["size"].(int); ok && v != 0 { + a.Size = aws.Int32(int32(v)) + } + + if v, ok := tfMap["type"].(string); ok && v != "" { + a.Type = types.KxNAS1Type(v) + } + return a +} + +func flattenNas1Configuration(apiObject *types.KxNAS1Configuration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.Size; v != nil { + m["size"] = aws.ToInt32(v) + } + + if v := apiObject.Type; v != "" { + m["type"] = v + } + + return []interface{}{m} +} + +func flattenCluster(apiObject *types.KxAttachedCluster) map[string]interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ClusterName; aws.ToString(v) != "" { + m["cluster_name"] = aws.ToString(v) + } + + if v := apiObject.ClusterStatus; v != "" { + m["cluster_status"] = string(v) + } + + if v := apiObject.ClusterType; v != "" { + m["cluster_type"] = string(v) + } + + return m +} + +func flattenAttachedClusters(apiObjects []types.KxAttachedCluster) []interface{} { + if len(apiObjects) == 0 { + return nil + } + + var l []interface{} + + for _, apiObject := range apiObjects { + l = append(l, flattenCluster(&apiObject)) + } + + return l +} diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go new file mode 100644 index 00000000000..520c918c1d0 --- /dev/null +++ b/internal/service/finspace/kx_volume_test.go @@ -0,0 +1,278 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxVolume_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxVolume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxVolume_dissappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxVolume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_volume" { + continue + } + + input := &finspace.GetKxVolumeInput{ + VolumeName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + } + _, err := conn.GetKxVolume(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxVolume, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` + resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } + } + `, rName)) +} + +func testAccKxVolumeConfigBase(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + data "aws_partition" "current" {} + + output "account_id" { + value = data.aws_caller_identity.current.account_id + } + + resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + } + + resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn + } + + data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } + } + + resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json + } + + resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true + } + + resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + } + + resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + } + + resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + } + + data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id + } + + resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + `, rName) +} + +func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxVolume(ctx, &finspace.GetKxVolumeInput{ + VolumeName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + }) + + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *KxVolume = *resp + + return nil + } +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450e..9fb0005d844 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -60,6 +60,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxVolume, + TypeName: "aws_finspace_kx_volume", + Name: "Kx Volume", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown new file mode 100644 index 00000000000..71e855f1bc4 --- /dev/null +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -0,0 +1,98 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_volume" +description: |- + Terraform resource for managing an AWS FinSpace Kx Volume. +--- + +# Resource: aws_finspace_kx_volume + +Terraform resource for managing an AWS FinSpace Kx Volume. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_volume" "example" { + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. + * SINGLE - Assigns one availability zone per volume. +* `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. +* `name` - (Required) Unique name for the volumr that you want to create. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports NAS_1 volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. +* `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. + +The following arguments are optional: + +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +* `description` - (Optional) Description of the volume. +* `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume + + +### nas1_configuration + +The nas1_configuration block supports the following arguments: + +* `size` - (Required) The size of the network attached storage. +* `security_group_ids` - (Required) The type of the network attached storage. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX volume. +* `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `status` - The status of volume creation. + * CREATING – The volume creation is in progress. + * CREATE_FAILED – The volume creation has failed. + * ACTIVE – The volume is active. + * UPDATING – The volume is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * UPDATED – The volume is successfully updated. + * DELETING – The volume is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the volume. + * DELETED – The volume is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `30m`) +* `update` - (Default `30m`) +* `delete` - (Default `45m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_volume.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Volume using the `id` (environment ID and volume name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_volume.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-volume +``` From c1f5965ebebadffc3eef1df7383210b01d04c0c8 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 12:48:09 -0800 Subject: [PATCH 060/438] Add scaling group for hashicorp PR. --- internal/service/finspace/kx_scaling_group.go | 292 ++++++++++++++++++ .../service/finspace/kx_scaling_group_test.go | 273 ++++++++++++++++ .../service/finspace/service_package_gen.go | 16 + .../r/finspace_kx_scaling_group.html.markdown | 81 +++++ 4 files changed, 662 insertions(+) create mode 100644 internal/service/finspace/kx_scaling_group.go create mode 100644 internal/service/finspace/kx_scaling_group_test.go create mode 100644 website/docs/r/finspace_kx_scaling_group.html.markdown diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go new file mode 100644 index 00000000000..db5b8e9713c --- /dev/null +++ b/internal/service/finspace/kx_scaling_group.go @@ -0,0 +1,292 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace + +import ( + "context" + "errors" + "log" + "time" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_finspace_kx_scaling_group", name="Kx Scaling Group") +// @Tags(identifierAttribute="arn") +func ResourceKxScalingGroup() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceKxScalingGroupCreate, + ReadWithoutTimeout: resourceKxScalingGroupRead, + UpdateWithoutTimeout: resourceKxScalingGroupUpdate, + DeleteWithoutTimeout: resourceKxScalingGroupDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(45 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(60 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "availability_zone_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "host_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(1, 32), + }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "clusters": { + Type: schema.TypeList, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Computed: true, + }, + "status": { + Type: schema.TypeString, + Computed: true, + }, + "status_reason": { + Type: schema.TypeString, + Computed: true, + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + CustomizeDiff: verify.SetTagsDiff, + } +} + +const ( + ResNameKxScalingGroup = "Kx Scaling Group" + kxScalingGroupIDPartCount = 2 +) + +func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + environmentId := d.Get("environment_id").(string) + scalingGroupName := d.Get("name").(string) + idParts := []string{ + environmentId, + scalingGroupName, + } + rID, err := flex.FlattenResourceId(idParts, kxScalingGroupIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err)...) + } + d.SetId(rID) + + in := &finspace.CreateKxScalingGroupInput{ + EnvironmentId: aws.String(environmentId), + ScalingGroupName: aws.String(scalingGroupName), + HostType: aws.String(d.Get("host_type").(string)), + AvailabilityZoneId: aws.String(d.Get("availability_zone_id").(string)), + Tags: getTagsIn(ctx), + } + + out, err := conn.CreateKxScalingGroup(ctx, in) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err)...) + } + + if out == nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output"))...) + } + + if _, err := waitKxScalingGroupCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err)...) + } + + return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) +} + +func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusCreating), + Target: enum.Slice(types.KxScalingGroupStatusActive), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} + +func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := findKxScalingGroupById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} + +func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + out, err := findKxScalingGroupById(ctx, conn, d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] FinSpace KxScalingGroup (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err)...) + } + d.Set("arn", out.ScalingGroupArn) + d.Set("status", out.Status) + d.Set("status_reason", out.StatusReason) + d.Set("created_timestamp", out.CreatedTimestamp.String()) + d.Set("last_modified_timestamp", out.LastModifiedTimestamp.String()) + d.Set("name", out.ScalingGroupName) + d.Set("availability_zone_id", out.AvailabilityZoneId) + d.Set("host_type", out.HostType) + d.Set("clusters", out.Clusters) + + parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) + if err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err)...) + } + d.Set("environment_id", parts[0]) + + return diags +} + +func resourceKxScalingGroupUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + // Tags only. + return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) +} + +func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + + log.Printf("[INFO] Deleting FinSpace KxScalingGroup %s", d.Id()) + _, err := conn.DeleteKxScalingGroup(ctx, &finspace.DeleteKxScalingGroupInput{ + ScalingGroupName: aws.String(d.Get("name").(string)), + EnvironmentId: aws.String(d.Get("environment_id").(string)), + }) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return diags + } + + return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err)...) + } + + _, err = waitKxScalingGroupDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) + if err != nil && !tfresource.NotFound(err) { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err)...) + } + + return diags +} + +func findKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { + parts, err := flex.ExpandResourceId(id, kxScalingGroupIDPartCount, false) + if err != nil { + return nil, err + } + in := &finspace.GetKxScalingGroupInput{ + EnvironmentId: aws.String(parts[0]), + ScalingGroupName: aws.String(parts[1]), + } + + out, err := conn.GetKxScalingGroup(ctx, in) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + + return nil, err + } + + if out == nil || out.ScalingGroupName == nil { + return nil, tfresource.NewEmptyResultError(in) + } + return out, nil +} + +func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusDeleting), + Target: enum.Slice(types.KxScalingGroupStatusDeleted), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go new file mode 100644 index 00000000000..3f5b3714a94 --- /dev/null +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -0,0 +1,273 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package finspace_test + +import ( + "context" + "errors" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/finspace" + "github.com/aws/aws-sdk-go-v2/service/finspace/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" + tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxScalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxScalingGroupStatusActive)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccFinSpaceKxScalingGroup_dissappears(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var KxScalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxScalingGroup(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_finspace_kx_scaling_group" { + continue + } + + input := &finspace.GetKxScalingGroupInput{ + ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + } + _, err := conn.GetKxScalingGroup(ctx, input) + if err != nil { + var nfe *types.ResourceNotFoundException + if errors.As(err, &nfe) { + return nil + } + return err + } + + return create.Error(names.FinSpace, create.ErrActionCheckingDestroyed, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} + +func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + resp, err := conn.GetKxScalingGroup(ctx, &finspace.GetKxScalingGroupInput{ + ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), + EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), + }) + + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, err) + } + + *KxScalingGroup = *resp + + return nil + } +} + +func testAccKxScalingGroupConfigBase(rName string) string { + return fmt.Sprintf(` + data "aws_caller_identity" "current" {} + data "aws_partition" "current" {} + + output "account_id" { + value = data.aws_caller_identity.current.account_id + } + + resource "aws_kms_key" "test" { + deletion_window_in_days = 7 + } + + resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn + } + + data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } + } + } + + resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json + } + + resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true + } + + resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + } + + resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + } + + resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id + } + + data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id + } + + resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id + } + `, rName) +} + +func testAccKxScalingGroupConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` + resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + } + `, rName)) +} diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 42b687b450e..029ecf2b731 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -52,6 +52,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxScalingGroup, + TypeName: "aws_finspace_kx_scaling_group", + Name: "Kx Scaling Group", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, { Factory: ResourceKxUser, TypeName: "aws_finspace_kx_user", @@ -60,6 +68,14 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, + { + Factory: ResourceKxVolume, + TypeName: "aws_finspace_kx_volume", + Name: "Kx Volume", + Tags: &types.ServicePackageResourceTags{ + IdentifierAttribute: "arn", + }, + }, } } diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown new file mode 100644 index 00000000000..fa5717c5862 --- /dev/null +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -0,0 +1,81 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_scaling_group" +description: |- + Terraform resource for managing an AWS FinSpace Kx Scaling Group. +--- + +# Resource: aws_finspace_kx_scaling_group + +Terraform resource for managing an AWS FinSpace Kx Scaling Group. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_scaling_group" "test" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" +} +``` + +## Argument Reference + +The following arguments are required: + +* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. +* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. +* `name` - (Required) Unique name for the scaling group that you want to create. +* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. + +The following arguments are optional: + +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. +* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. +* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. +*`status` - The status of scaling group. + * CREATING – The scaling group creation is in progress. + * CREATE_FAILED – The scaling group creation has failed. + * ACTIVE – The scaling group is active. + * UPDATING – The scaling group is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * DELETING – The scaling group is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the scaling group. + * DELETED – The scaling group is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `45m`) +* `update` - (Default `30m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_scaling_group.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup +``` From 623c3ecdeb9e02f45a18223e929ea0dd1099a44d Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 13:16:30 -0800 Subject: [PATCH 061/438] Cluster chnages for Hashicorp PR. --- internal/service/finspace/kx_cluster.go | 219 ++++++++++- internal/service/finspace/kx_cluster_test.go | 345 +++++++++++++++++- .../docs/r/finspace_kx_cluster.html.markdown | 22 +- .../r/finspace_kx_scaling_group.html.markdown | 81 ++++ 4 files changed, 639 insertions(+), 28 deletions(-) create mode 100644 website/docs/r/finspace_kx_scaling_group.html.markdown diff --git a/internal/service/finspace/kx_cluster.go b/internal/service/finspace/kx_cluster.go index 2346f196a56..6a6f26833e2 100644 --- a/internal/service/finspace/kx_cluster.go +++ b/internal/service/finspace/kx_cluster.go @@ -132,7 +132,7 @@ func ResourceKxCluster() *schema.Resource { }, "capacity_configuration": { Type: schema.TypeList, - Required: true, + Optional: true, ForceNew: true, MaxItems: 1, Elem: &schema.Resource{ @@ -225,6 +225,12 @@ func ResourceKxCluster() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(3, 63), }, + "dataview_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, }, }, }, @@ -280,17 +286,23 @@ func ResourceKxCluster() *schema.Resource { Schema: map[string]*schema.Schema{ "type": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.StringInSlice( enum.Slice(types.KxSavedownStorageTypeSds01), true), }, "size": { Type: schema.TypeInt, - Required: true, + Optional: true, ForceNew: true, ValidateFunc: validation.IntBetween(10, 16000), }, + "volume_name": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, }, }, }, @@ -346,6 +358,64 @@ func ResourceKxCluster() *schema.Resource { }, }, }, + "scaling_group_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "scaling_group_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cpu": { + Type: schema.TypeFloat, + Optional: true, + ForceNew: true, + ValidateFunc: validation.FloatAtLeast(0.1), + }, + "node_count": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(1), + }, + "memory_limit": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(6), + }, + "memory_reservation": { + Type: schema.TypeInt, + Required: true, + ForceNew: true, + ValidateFunc: validation.IntAtLeast(6), + }, + }, + }, + }, + "tickerplant_log_configuration": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "tickerplant_log_volumes": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Elem: &schema.Schema{ + Type: schema.TypeString, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + }, + }, + }, + }, }, CustomizeDiff: verify.SetTagsDiff, @@ -375,14 +445,13 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(rID) in := &finspace.CreateKxClusterInput{ - EnvironmentId: aws.String(environmentId), - ClusterName: aws.String(clusterName), - ClusterType: types.KxClusterType(d.Get("type").(string)), - ReleaseLabel: aws.String(d.Get("release_label").(string)), - AzMode: types.KxAzMode(d.Get("az_mode").(string)), - CapacityConfiguration: expandCapacityConfiguration(d.Get("capacity_configuration").([]interface{})), - ClientToken: aws.String(id.UniqueId()), - Tags: getTagsIn(ctx), + EnvironmentId: aws.String(environmentId), + ClusterName: aws.String(clusterName), + ClusterType: types.KxClusterType(d.Get("type").(string)), + ReleaseLabel: aws.String(d.Get("release_label").(string)), + AzMode: types.KxAzMode(d.Get("az_mode").(string)), + ClientToken: aws.String(id.UniqueId()), + Tags: getTagsIn(ctx), } if v, ok := d.GetOk("description"); ok { @@ -401,6 +470,10 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i in.AvailabilityZoneId = aws.String(v.(string)) } + if v, ok := d.GetOk("capacity_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.CapacityConfiguration = expandCapacityConfiguration(v.([]interface{})) + } + if v, ok := d.GetOk("command_line_arguments"); ok && len(v.(map[string]interface{})) > 0 { in.CommandLineArguments = expandCommandLineArguments(v.(map[string]interface{})) } @@ -429,6 +502,14 @@ func resourceKxClusterCreate(ctx context.Context, d *schema.ResourceData, meta i in.Code = expandCode(v.([]interface{})) } + if v, ok := d.GetOk("scaling_group_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.ScalingGroupConfiguration = expandScalingGroupConfiguration(v.([]interface{})) + } + + if v, ok := d.GetOk("tickerplant_log_configuration"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + in.TickerplantLogConfiguration = expandTickerplantLogConfiguration(v.([]interface{})) + } + out, err := conn.CreateKxCluster(ctx, in) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxCluster, d.Get("name").(string), err) @@ -507,6 +588,14 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } + if err := d.Set("scaling_group_configuration", flattenScalingGroupConfiguration(out.ScalingGroupConfiguration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + } + + if err := d.Set("tickerplant_log_configuration", flattenTickerplantLogConfiguration(out.TickerplantLogConfiguration)); err != nil { + return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + } + // compose cluster ARN using environment ARN parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) if err != nil { @@ -767,6 +856,38 @@ func expandAutoScalingConfiguration(tfList []interface{}) *types.AutoScalingConf return a } +func expandScalingGroupConfiguration(tfList []interface{}) *types.KxScalingGroupConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.KxScalingGroupConfiguration{} + + if v, ok := tfMap["scaling_group_name"].(string); ok && v != "" { + a.ScalingGroupName = aws.String(v) + } + + if v, ok := tfMap["node_count"].(int); ok && v != 0 { + a.NodeCount = aws.Int32(int32(v)) + } + + if v, ok := tfMap["memory_limit"].(int); ok && v != 0 { + a.MemoryLimit = aws.Int32(int32(v)) + } + + if v, ok := tfMap["cpu"].(float64); ok && v != 0 { + a.Cpu = aws.Float64(v) + } + + if v, ok := tfMap["memory_reservation"].(int); ok && v != 0 { + a.MemoryReservation = aws.Int32(int32(v)) + } + + return a +} + func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownStorageConfiguration { if len(tfList) == 0 || tfList[0] == nil { return nil @@ -784,6 +905,10 @@ func expandSavedownStorageConfiguration(tfList []interface{}) *types.KxSavedownS a.Size = aws.Int32(int32(v)) } + if v, ok := tfMap["volume_name"].(string); ok && v != "" { + a.VolumeName = aws.String(v) + } + return a } @@ -815,6 +940,22 @@ func expandVPCConfiguration(tfList []interface{}) *types.VpcConfiguration { return a } +func expandTickerplantLogConfiguration(tfList []interface{}) *types.TickerplantLogConfiguration { + if len(tfList) == 0 || tfList[0] == nil { + return nil + } + + tfMap := tfList[0].(map[string]interface{}) + + a := &types.TickerplantLogConfiguration{} + + if v, ok := tfMap["tickerplant_log_volumes"].(*schema.Set); ok && v.Len() > 0 { + a.TickerplantLogVolumes = flex.ExpandStringValueSet(v) + } + + return a +} + func expandCacheStorageConfiguration(tfMap map[string]interface{}) *types.KxCacheStorageConfiguration { if tfMap == nil { return nil @@ -896,6 +1037,10 @@ func expandDatabase(tfMap map[string]interface{}) *types.KxDatabaseConfiguration a.DatabaseName = aws.String(v) } + if v, ok := tfMap["dataview_name"].(string); ok && v != "" { + a.DataviewName = aws.String(v) + } + if v, ok := tfMap["cache_configurations"]; ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { a.CacheConfigurations = expandCacheConfigurations(v.([]interface{})) } @@ -1059,6 +1204,50 @@ func flattenAutoScalingConfiguration(apiObject *types.AutoScalingConfiguration) return []interface{}{m} } +func flattenScalingGroupConfiguration(apiObject *types.KxScalingGroupConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.ScalingGroupName; v != nil { + m["scaling_group_name"] = aws.ToString(v) + } + + if v := apiObject.NodeCount; v != nil { + m["node_count"] = aws.ToInt32(v) + } + + if v := apiObject.MemoryLimit; v != nil { + m["memory_limit"] = aws.ToInt32(v) + } + + if v := apiObject.Cpu; v != nil { + m["cpu"] = aws.ToFloat64(v) + } + + if v := apiObject.MemoryReservation; v != nil { + m["memory_reservation"] = aws.ToInt32(v) + } + + return []interface{}{m} +} + +func flattenTickerplantLogConfiguration(apiObject *types.TickerplantLogConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + m := map[string]interface{}{} + + if v := apiObject.TickerplantLogVolumes; v != nil { + m["tickerplant_log_volumes"] = v + } + + return []interface{}{m} +} + func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfiguration) []interface{} { if apiObject == nil { return nil @@ -1074,6 +1263,10 @@ func flattenSavedownStorageConfiguration(apiObject *types.KxSavedownStorageConfi m["size"] = v } + if v := apiObject.VolumeName; v != nil { + m["volume_name"] = aws.ToString(v) + } + return []interface{}{m} } @@ -1200,6 +1393,10 @@ func flattenDatabase(apiObject *types.KxDatabaseConfiguration) map[string]interf m["database_name"] = aws.ToString(v) } + if v := apiObject.DataviewName; v != nil { + m["dataview_name"] = aws.ToString(v) + } + if v := apiObject.CacheConfigurations; v != nil { m["cache_configurations"] = flattenCacheConfigurations(v) } diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 6fc14e33133..3c5c2738a75 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -32,7 +32,7 @@ func testAccPreCheckManagedKxLicenseEnabled(t *testing.T) { } } -func TestAccFinSpaceKxCluster_basic(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -69,7 +69,7 @@ func TestAccFinSpaceKxCluster_basic(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_disappears(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -101,7 +101,7 @@ func TestAccFinSpaceKxCluster_disappears(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_description(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_description(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -132,7 +132,7 @@ func TestAccFinSpaceKxCluster_description(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_database(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_database(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -164,7 +164,7 @@ func TestAccFinSpaceKxCluster_database(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_cacheConfigurations(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_cacheConfigurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -195,7 +195,7 @@ func TestAccFinSpaceKxCluster_cacheConfigurations(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_cache250Configurations(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_cache250Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -233,7 +233,7 @@ func TestAccFinSpaceKxCluster_cache250Configurations(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_cache12Configurations(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_cache12Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -271,7 +271,7 @@ func TestAccFinSpaceKxCluster_cache12Configurations(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_code(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_code(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -319,7 +319,7 @@ func TestAccFinSpaceKxCluster_code(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_multiAZ(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_multiAZ(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -350,7 +350,7 @@ func TestAccFinSpaceKxCluster_multiAZ(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_rdb(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_rdb(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -381,7 +381,7 @@ func TestAccFinSpaceKxCluster_rdb(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_executionRole(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_executionRole(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -412,7 +412,7 @@ func TestAccFinSpaceKxCluster_executionRole(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_autoScaling(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_autoScaling(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -443,7 +443,7 @@ func TestAccFinSpaceKxCluster_autoScaling(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_initializationScript(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -484,7 +484,7 @@ func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_commandLineArgs(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -527,7 +527,7 @@ func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_tags(t *testing.T) { +func TestAccSKIPFinSpaceKxCluster_tags(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -576,6 +576,134 @@ func TestAccFinSpaceKxCluster_tags(t *testing.T) { }) } +func TestAccSKIPFinSpaceKxCluster_ScalingGroup(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxClusterConfig_ScalingGroup(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccSKIPFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxTPClusterInScalingGroup_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxTPClusterConfigInScalingGroup_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + +func TestAccFinSpaceKxClusterInScalingGroup_withKxDataview(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var kxcluster finspace.GetKxClusterOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_cluster.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + testAccPreCheckManagedKxLicenseEnabled(t) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxClusterDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxClusterConfigInScalingGroup_withKxDataview(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxClusterExists(ctx, resourceName, &kxcluster), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxClusterStatusRunning)), + ), + }, + }, + }) +} + func testAccCheckKxClusterDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -746,6 +874,50 @@ resource "aws_route" "r" { `, rName) } +func testAccKxClusterConfigScalingGroupBase(rName string) string { + return fmt.Sprintf(` + resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + } + `, rName) +} + +func testAccKxClusterConfigKxVolumeBase(rName string) string { + return fmt.Sprintf(` + resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_1000" + size= 1200 + } + } + `, rName) +} + +func testAccKxClusterConfigKxDataviewBase(rName string) string { + return fmt.Sprintf(` +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} + +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName) +} func testAccKxClusterConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), @@ -772,6 +944,149 @@ resource "aws_finspace_kx_cluster" "test" { `, rName)) } +func testAccKxClusterConfig_ScalingGroup(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "HDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } +} +`, rName)) +} + +func testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigKxVolumeBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + } + +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "RDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + database { + database_name = aws_finspace_kx_database.test.name + } + savedown_storage_configuration { + volume_name = aws_finspace_kx_volume.test.name + } +} +`, rName)) +} + +func testAccKxTPClusterConfigInScalingGroup_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigKxVolumeBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "TICKERPLANT" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + tickerplant_log_configuration { + tickerplant_log_volumes = [aws_finspace_kx_volume.test.name] + } +} +`, rName)) +} + +func testAccKxClusterConfigInScalingGroup_withKxDataview(rName string) string { + return acctest.ConfigCompose( + testAccKxClusterConfigBase(rName), + testAccKxClusterConfigScalingGroupBase(rName), + testAccKxClusterConfigKxDataviewBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_cluster" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + type = "HDB" + release_label = "1.0" + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + vpc_configuration { + vpc_id = aws_vpc.test.id + security_group_ids = [aws_security_group.test.id] + subnet_ids = [aws_subnet.test.id] + ip_address_type = "IP_V4" + } + + scaling_group_configuration { + scaling_group_name = aws_finspace_kx_scaling_group.test.name + memory_limit = 200 + memory_reservation = 100 + node_count = 1 + cpu = 0.5 + } + + database { + database_name = aws_finspace_kx_database.test.name + dataview_name = aws_finspace_kx_dataview.test.name + } + + lifecycle { + ignore_changes = [database] + } +} +`, rName)) +} + func testAccKxClusterConfig_description(rName, description string) string { return acctest.ConfigCompose( testAccKxClusterConfigBase(rName), diff --git a/website/docs/r/finspace_kx_cluster.html.markdown b/website/docs/r/finspace_kx_cluster.html.markdown index 52ed4105d4a..f7d59e6d71a 100644 --- a/website/docs/r/finspace_kx_cluster.html.markdown +++ b/website/docs/r/finspace_kx_cluster.html.markdown @@ -92,6 +92,8 @@ The following arguments are optional: * `execution_role` - (Optional) An IAM role that defines a set of permissions associated with a cluster. These permissions are assumed when a cluster attempts to access another cluster. * `initialization_script` - (Optional) Path to Q program that will be run at launch of a cluster. This is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. * `savedown_storage_configuration` - (Optional) Size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose `type` as RDB. All the data written to this storage space is lost when the cluster node is restarted. See [savedown_storage_configuration](#savedown_storage_configuration). +* `scaling_group_configuration` - (Optional) The structure that stores the configuration details of a scaling group. +* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_scaling_configuration @@ -149,6 +151,7 @@ The database block supports the following arguments: * `database_name` - (Required) Name of the KX database. * `cache_configurations` - (Optional) Configuration details for the disk cache to increase performance reading from a KX database mounted to the cluster. See [cache_configurations](#cache_configurations). * `changeset_id` - (Optional) A unique identifier of the changeset that is associated with the cluster. +* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. #### cache_configurations @@ -161,9 +164,10 @@ The cache_configuration block supports the following arguments: The savedown_storage_configuration block supports the following arguments: -* `type` - (Required) Type of writeable storage space for temporarily storing your savedown data. The valid values are: +* `type` - (Optional) Type of writeable storage space for temporarily storing your savedown data. The valid values are: * SDS01 - This type represents 3000 IOPS and io2 ebs volume type. -* `size` - (Required) Size of temporary storage in gigabytes. Must be between 10 and 16000. +* `size` - (Optional) Size of temporary storage in gigabytes. Must be between 10 and 16000. +* `volume_name` - (Optional) The name of the kdb volume that you want to use as writeable save-down storage for clusters. ### vpc_configuration @@ -174,6 +178,20 @@ The vpc_configuration block supports the following arguments: * `subnet_ids `- (Required) Identifier of the subnet that the Privatelink VPC endpoint uses to connect to the cluster. * `ip_address_type` - (Required) IP address type for cluster network configuration parameters. The following type is available: IP_V4 - IP address version 4. +### scaling_group_configuration + +* `scaling_group_name` - (Required) A unique identifier for the kdb scaling group. +* `memory_reservation` - (Required) A reservation of the minimum amount of memory that should be available on the scaling group for a kdb cluster to be successfully placed in a scaling group. +* `node_count` - (Required) The number of kdb cluster nodes. +* `cpu` - The number of vCPUs that you want to reserve for each node of this kdb cluster on the scaling group host. +* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. + +### tickerplant_log_configuration + +The tickerplant_log_configuration block supports the following arguments: + +* tickerplant_log_volumes - (Required) The names of the volumes for tickerplant logs. + ## Attribute Reference This resource exports the following attributes in addition to the arguments above: diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown new file mode 100644 index 00000000000..fa5717c5862 --- /dev/null +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -0,0 +1,81 @@ +--- +subcategory: "FinSpace" +layout: "aws" +page_title: "AWS: aws_finspace_kx_scaling_group" +description: |- + Terraform resource for managing an AWS FinSpace Kx Scaling Group. +--- + +# Resource: aws_finspace_kx_scaling_group + +Terraform resource for managing an AWS FinSpace Kx Scaling Group. + +## Example Usage + +### Basic Usage + +```terraform +resource "aws_finspace_kx_scaling_group" "test" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" +} +``` + +## Argument Reference + +The following arguments are required: + +* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. +* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. +* `name` - (Required) Unique name for the scaling group that you want to create. +* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. + +The following arguments are optional: + +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. +* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. +* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. +*`status` - The status of scaling group. + * CREATING – The scaling group creation is in progress. + * CREATE_FAILED – The scaling group creation has failed. + * ACTIVE – The scaling group is active. + * UPDATING – The scaling group is in the process of being updated. + * UPDATE_FAILED – The update action failed. + * DELETING – The scaling group is in the process of being deleted. + * DELETE_FAILED – The system failed to delete the scaling group. + * DELETED – The scaling group is successfully deleted. +* `status_reason` - The error message when a failed state occurs. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +* `create` - (Default `45m`) +* `update` - (Default `30m`) +* `delete` - (Default `60m`) + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```terraform +import { + to = aws_finspace_kx_scaling_group.example + id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" +} +``` + +Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: + +```console +% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup +``` From dd1d92cc4239aa6081c5c00ec44a581daccc514d Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Fri, 8 Dec 2023 13:42:04 -0800 Subject: [PATCH 062/438] remove kx volume from service package. --- internal/service/finspace/service_package_gen.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/internal/service/finspace/service_package_gen.go b/internal/service/finspace/service_package_gen.go index 029ecf2b731..46468a889b9 100644 --- a/internal/service/finspace/service_package_gen.go +++ b/internal/service/finspace/service_package_gen.go @@ -68,14 +68,6 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka IdentifierAttribute: "arn", }, }, - { - Factory: ResourceKxVolume, - TypeName: "aws_finspace_kx_volume", - Name: "Kx Volume", - Tags: &types.ServicePackageResourceTags{ - IdentifierAttribute: "arn", - }, - }, } } From 33301ff8451efe887d20fcf93a81edca530d4d8d Mon Sep 17 00:00:00 2001 From: David Hwang Date: Fri, 8 Dec 2023 16:45:55 -0500 Subject: [PATCH 063/438] Remove skips from cluster test --- internal/service/finspace/kx_cluster_test.go | 30 +++---- .../r/finspace_kx_scaling_group.html.markdown | 81 ------------------- 2 files changed, 15 insertions(+), 96 deletions(-) delete mode 100644 website/docs/r/finspace_kx_scaling_group.html.markdown diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 3c5c2738a75..16c5b53d1d6 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -101,7 +101,7 @@ func TestAccSKIPFinSpaceKxCluster_disappears(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_description(t *testing.T) { +func TestAccFinSpaceKxCluster_description(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -132,7 +132,7 @@ func TestAccSKIPFinSpaceKxCluster_description(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_database(t *testing.T) { +func TestAccFinSpaceKxCluster_database(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -164,7 +164,7 @@ func TestAccSKIPFinSpaceKxCluster_database(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_cacheConfigurations(t *testing.T) { +func TestAccFinSpaceKxCluster_cacheConfigurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -195,7 +195,7 @@ func TestAccSKIPFinSpaceKxCluster_cacheConfigurations(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_cache250Configurations(t *testing.T) { +func TestAccFinSpaceKxCluster_cache250Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -233,7 +233,7 @@ func TestAccSKIPFinSpaceKxCluster_cache250Configurations(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_cache12Configurations(t *testing.T) { +func TestAccFinSpaceKxCluster_cache12Configurations(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -271,7 +271,7 @@ func TestAccSKIPFinSpaceKxCluster_cache12Configurations(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_code(t *testing.T) { +func TestAccFinSpaceKxCluster_code(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -319,7 +319,7 @@ func TestAccSKIPFinSpaceKxCluster_code(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_multiAZ(t *testing.T) { +func TestAccFinSpaceKxCluster_multiAZ(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -350,7 +350,7 @@ func TestAccSKIPFinSpaceKxCluster_multiAZ(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_rdb(t *testing.T) { +func TestAccFinSpaceKxCluster_rdb(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -381,7 +381,7 @@ func TestAccSKIPFinSpaceKxCluster_rdb(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_executionRole(t *testing.T) { +func TestAccFinSpaceKxCluster_executionRole(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -412,7 +412,7 @@ func TestAccSKIPFinSpaceKxCluster_executionRole(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_autoScaling(t *testing.T) { +func TestAccFinSpaceKxCluster_autoScaling(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -443,7 +443,7 @@ func TestAccSKIPFinSpaceKxCluster_autoScaling(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_initializationScript(t *testing.T) { +func TestAccFinSpaceKxCluster_initializationScript(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -484,7 +484,7 @@ func TestAccSKIPFinSpaceKxCluster_initializationScript(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_commandLineArgs(t *testing.T) { +func TestAccFinSpaceKxCluster_commandLineArgs(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -527,7 +527,7 @@ func TestAccSKIPFinSpaceKxCluster_commandLineArgs(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_tags(t *testing.T) { +func TestAccFinSpaceKxCluster_tags(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -576,7 +576,7 @@ func TestAccSKIPFinSpaceKxCluster_tags(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_ScalingGroup(t *testing.T) { +func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -608,7 +608,7 @@ func TestAccSKIPFinSpaceKxCluster_ScalingGroup(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { +func TestAccFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown deleted file mode 100644 index fa5717c5862..00000000000 --- a/website/docs/r/finspace_kx_scaling_group.html.markdown +++ /dev/null @@ -1,81 +0,0 @@ ---- -subcategory: "FinSpace" -layout: "aws" -page_title: "AWS: aws_finspace_kx_scaling_group" -description: |- - Terraform resource for managing an AWS FinSpace Kx Scaling Group. ---- - -# Resource: aws_finspace_kx_scaling_group - -Terraform resource for managing an AWS FinSpace Kx Scaling Group. - -## Example Usage - -### Basic Usage - -```terraform -resource "aws_finspace_kx_scaling_group" "test" { - name = "my-tf-kx-scalinggroup" - environment_id = aws_finspace_kx_environment.example.id - availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" -} -``` - -## Argument Reference - -The following arguments are required: - -* `availability_zone_id` - (Required) The availability zone identifiers for the requested regions. -* `environment_id` - (Required) A unique identifier for the kdb environment, where you want to create the scaling group. -* `name` - (Required) Unique name for the scaling group that you want to create. -* `host_type` - (Required) The memory and CPU capabilities of the scaling group host on which FinSpace Managed kdb clusters will be placed. - -The following arguments are optional: - -* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. You can add up to 50 tags to a scaling group. - -## Attribute Reference - -This resource exports the following attributes in addition to the arguments above: - -* `arn` - Amazon Resource Name (ARN) identifier of the KX Scaling Group. -* `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. -* `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. -* `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. -*`status` - The status of scaling group. - * CREATING – The scaling group creation is in progress. - * CREATE_FAILED – The scaling group creation has failed. - * ACTIVE – The scaling group is active. - * UPDATING – The scaling group is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * DELETING – The scaling group is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the scaling group. - * DELETED – The scaling group is successfully deleted. -* `status_reason` - The error message when a failed state occurs. -* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). - -## Timeouts - -[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): - -* `create` - (Default `45m`) -* `update` - (Default `30m`) -* `delete` - (Default `60m`) - -## Import - -In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import an AWS FinSpace Kx scaling group using the `id` (environment ID and scaling group name, comma-delimited). For example: - -```terraform -import { - to = aws_finspace_kx_scaling_group.example - id = "n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup" -} -``` - -Using `terraform import`, import an AWS FinSpace Kx Scaling Group using the `id` (environment ID and scaling group name, comma-delimited). For example: - -```console -% terraform import aws_finspace_kx_scaling_group.example n3ceo7wqxoxcti5tujqwzs,my-tf-kx-scalinggroup -``` From 16cebf96bf39a767b75d58ea7f03177aa610b282 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 03:56:11 +0100 Subject: [PATCH 064/438] feat: added resource, tests and docs --- .../service/ssoadmin/service_package_gen.go | 5 + .../service/ssoadmin/trusted_token_issuer.go | 399 ++++++++++++++++++ .../ssoadmin/trusted_token_issuer_test.go | 293 +++++++++++++ ...soadmin_trusted_token_issuer.html.markdown | 84 ++++ 4 files changed, 781 insertions(+) create mode 100644 internal/service/ssoadmin/trusted_token_issuer.go create mode 100644 internal/service/ssoadmin/trusted_token_issuer_test.go create mode 100644 website/docs/r/ssoadmin_trusted_token_issuer.html.markdown diff --git a/internal/service/ssoadmin/service_package_gen.go b/internal/service/ssoadmin/service_package_gen.go index 4c91e986ad3..cea26e4b544 100644 --- a/internal/service/ssoadmin/service_package_gen.go +++ b/internal/service/ssoadmin/service_package_gen.go @@ -96,6 +96,11 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Factory: ResourcePermissionsBoundaryAttachment, TypeName: "aws_ssoadmin_permissions_boundary_attachment", }, + { + Factory: ResourceTrustedTokenIssuer, + TypeName: "aws_ssoadmin_trusted_token_issuer", + Tags: &types.ServicePackageResourceTags{}, + }, } } diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go new file mode 100644 index 00000000000..6f7c88c1b7f --- /dev/null +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -0,0 +1,399 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssoadmin + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/ssoadmin" + "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @SDKResource("aws_ssoadmin_trusted_token_issuer") +// @Tags +func ResourceTrustedTokenIssuer() *schema.Resource { + return &schema.Resource{ + CreateWithoutTimeout: resourceTrustedTokenIssuerCreate, + ReadWithoutTimeout: resourceTrustedTokenIssuerRead, + UpdateWithoutTimeout: resourceTrustedTokenIssuerUpdate, + DeleteWithoutTimeout: resourceTrustedTokenIssuerDelete, + + Importer: &schema.ResourceImporter{ + StateContext: schema.ImportStatePassthroughContext, + }, + Schema: map[string]*schema.Schema{ + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "client_token": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, + "instance_arn": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: verify.ValidARN, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "trusted_token_issuer_configuration": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "oidc_jwt_configuration": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "claim_attribute_path": { + Type: schema.TypeString, + Required: true, + }, + "identity_store_attribute_path": { + Type: schema.TypeString, + Required: true, + }, + "issuer_url": { + Type: schema.TypeString, + Required: true, + ForceNew: true, // Not part of OidcJwtUpdateConfiguration struct, have to recreate at change + }, + "jwks_retrieval_option": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.JwksRetrievalOption](), + }, + }, + }, + }, + }, + }, + }, + "trusted_token_issuer_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.TrustedTokenIssuerType](), + }, + names.AttrTags: tftags.TagsSchema(), + names.AttrTagsAll: tftags.TagsSchemaComputed(), + }, + + CustomizeDiff: verify.SetTagsDiff, + } +} + +func resourceTrustedTokenIssuerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + input := &ssoadmin.CreateTrustedTokenIssuerInput{ + InstanceArn: aws.String(d.Get("instance_arn").(string)), + Name: aws.String(d.Get("name").(string)), + TrustedTokenIssuerConfiguration: expandTrustedTokenIssuerConfiguration(d.Get("trusted_token_issuer_configuration").([]interface{})), + TrustedTokenIssuerType: types.TrustedTokenIssuerType(d.Get("trusted_token_issuer_type").(string)), + Tags: getTagsIn(ctx), + } + + if v, ok := d.GetOk("client_token"); ok { + input.ClientToken = aws.String(v.(string)) + } + + output, err := conn.CreateTrustedTokenIssuer(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "creating SSO Trusted Token Issuer (%s): %s", d.Get("name").(string), err) + } + + d.SetId(aws.ToString(output.TrustedTokenIssuerArn)) + + return append(diags, resourceTrustedTokenIssuerRead(ctx, d, meta)...) +} + +func resourceTrustedTokenIssuerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + output, err := FindTrustedTokenIssuerByARN(ctx, conn, d.Id()) + + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] SSO Trusted Token Issuer (%s) not found, removing from state", d.Id()) + d.SetId("") + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading SSO Trusted Token Issuer (%s): %s", d.Id(), err) + } + + instanceARN, _ := TrustedTokenIssuerParseInstanceID(d.Id()) + + d.Set("name", output.Name) + d.Set("arn", output.TrustedTokenIssuerArn) + d.Set("instance_arn", instanceARN) + d.Set("trusted_token_issuer_configuration", flattenTrustedTokenIssuerConfiguration(output.TrustedTokenIssuerConfiguration)) + d.Set("trusted_token_issuer_type", output.TrustedTokenIssuerType) + + // listTags requires both trusted token issuer and instance ARN, so must be called + // explicitly rather than with transparent tagging. + tags, err := listTags(ctx, conn, d.Id(), instanceARN) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading SSO Trusted Token Issuer (%s): %s", d.Id(), err) + } + + setTagsOut(ctx, Tags(tags)) + + return diags +} + +func resourceTrustedTokenIssuerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + if d.HasChangesExcept("tags", "tags_all") { + input := &ssoadmin.UpdateTrustedTokenIssuerInput{ + TrustedTokenIssuerArn: aws.String(d.Id()), + } + + if d.HasChange("name") { + input.Name = aws.String(d.Get("name").(string)) + } + + if d.HasChange("trusted_token_issuer_configuration") { + input.TrustedTokenIssuerConfiguration = expandTrustedTokenIssuerUpdateConfiguration(d.Get("trusted_token_issuer_configuration").([]interface{})) + } + + _, err := conn.UpdateTrustedTokenIssuer(ctx, input) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating SSO Trusted Token Issuer (%s): %s", d.Id(), err) + } + } + + // listTags requires both trusted token issuer and instance ARN, so must be called + // explicitly rather than with transparent tagging. + if d.HasChange("tags_all") { + oldTagsAll, newTagsAll := d.GetChange("tags_all") + if err := updateTags(ctx, conn, d.Id(), d.Get("instance_arn").(string), oldTagsAll, newTagsAll); err != nil { + return sdkdiag.AppendErrorf(diags, "updating SSO Trusted Token Issuer (%s): %s", d.Id(), err) + } + } + + return append(diags, resourceTrustedTokenIssuerRead(ctx, d, meta)...) +} + +func resourceTrustedTokenIssuerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + + input := &ssoadmin.DeleteTrustedTokenIssuerInput{ + TrustedTokenIssuerArn: aws.String(d.Id()), + } + + log.Printf("[INFO] Deleting SSO Trusted Token Issuer: %s", d.Id()) + _, err := conn.DeleteTrustedTokenIssuer(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return diags + } + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting SSO Trusted Token Issuer (%s): %s", d.Id(), err) + } + + return diags +} + +func FindTrustedTokenIssuerByARN(ctx context.Context, conn *ssoadmin.Client, trustedTokenIssuerARN string) (*ssoadmin.DescribeTrustedTokenIssuerOutput, error) { + input := &ssoadmin.DescribeTrustedTokenIssuerInput{ + TrustedTokenIssuerArn: aws.String(trustedTokenIssuerARN), + } + + output, err := conn.DescribeTrustedTokenIssuer(ctx, input) + + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +// Instance ID is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. +// Instance ID can be extracted from the Trusted Token Issuer ARN +func TrustedTokenIssuerParseInstanceID(id string) (string, error) { + parts := strings.Split(id, "/") + + if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { + return fmt.Sprintf("arn:aws:sso:::instance/%s", parts[1]), nil + } + + return "", fmt.Errorf("unable to determine Instance ID from Trusted Token Issuer ARN: %s", id) +} + +func expandTrustedTokenIssuerConfiguration(tfMap []interface{}) types.TrustedTokenIssuerConfiguration { + if len(tfMap) == 0 { + return nil + } + + tfList, ok := tfMap[0].(map[string]interface{}) + if !ok { + return nil + } + + if v, ok := tfList["oidc_jwt_configuration"]; ok { + return &types.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration{ + Value: expandOidcJwtConfiguration(v.([]interface{})), + } + } + + return nil +} + +func expandOidcJwtConfiguration(tfMap []interface{}) types.OidcJwtConfiguration { + apiObject := types.OidcJwtConfiguration{} + + if len(tfMap) == 0 { + return apiObject + } + + tfList, ok := tfMap[0].(map[string]interface{}) + if !ok { + return apiObject + } + + if v, ok := tfList["claim_attribute_path"]; ok { + apiObject.ClaimAttributePath = aws.String(v.(string)) + } + + if v, ok := tfList["identity_store_attribute_path"]; ok { + apiObject.IdentityStoreAttributePath = aws.String(v.(string)) + } + + if v, ok := tfList["issuer_url"]; ok { + apiObject.IssuerUrl = aws.String(v.(string)) + } + + if v, ok := tfList["jwks_retrieval_option"]; ok { + apiObject.JwksRetrievalOption = types.JwksRetrievalOption(v.(string)) + } + + return apiObject +} + +func expandTrustedTokenIssuerUpdateConfiguration(tfMap []interface{}) types.TrustedTokenIssuerUpdateConfiguration { + if len(tfMap) == 0 { + return nil + } + + tfList, ok := tfMap[0].(map[string]interface{}) + if !ok { + return nil + } + + if v, ok := tfList["oidc_jwt_configuration"]; ok { + return &types.TrustedTokenIssuerUpdateConfigurationMemberOidcJwtConfiguration{ + Value: expandOidcJwtUpdateConfiguration(v.([]interface{})), + } + } + + return nil +} + +func expandOidcJwtUpdateConfiguration(tfMap []interface{}) types.OidcJwtUpdateConfiguration { + apiObject := types.OidcJwtUpdateConfiguration{} + + if len(tfMap) == 0 { + return apiObject + } + + tfList, ok := tfMap[0].(map[string]interface{}) + if !ok { + return apiObject + } + + if v, ok := tfList["claim_attribute_path"]; ok { + apiObject.ClaimAttributePath = aws.String(v.(string)) + } + + if v, ok := tfList["identity_store_attribute_path"]; ok { + apiObject.IdentityStoreAttributePath = aws.String(v.(string)) + } + + if v, ok := tfList["jwks_retrieval_option"]; ok { + apiObject.JwksRetrievalOption = types.JwksRetrievalOption(v.(string)) + } + + return apiObject +} + +func flattenTrustedTokenIssuerConfiguration(apiObject types.TrustedTokenIssuerConfiguration) []interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + switch v := apiObject.(type) { + case *types.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration: + tfMap["oidc_jwt_configuration"] = flattenOidcJwtConfiguration(v.Value) + default: + log.Println("union is nil or unknown type") + } + + return []interface{}{tfMap} +} + +func flattenOidcJwtConfiguration(apiObject types.OidcJwtConfiguration) []interface{} { + tfMap := map[string]interface{}{} + + if v := apiObject.ClaimAttributePath; v != nil { + tfMap["claim_attribute_path"] = aws.ToString(v) + } + + if v := apiObject.IdentityStoreAttributePath; v != nil { + tfMap["identity_store_attribute_path"] = aws.ToString(v) + } + + if v := apiObject.IssuerUrl; v != nil { + tfMap["issuer_url"] = aws.ToString(v) + } + + tfMap["jwks_retrieval_option"] = string(apiObject.JwksRetrievalOption) + + return []interface{}{tfMap} +} diff --git a/internal/service/ssoadmin/trusted_token_issuer_test.go b/internal/service/ssoadmin/trusted_token_issuer_test.go new file mode 100644 index 00000000000..cc666fa438c --- /dev/null +++ b/internal/service/ssoadmin/trusted_token_issuer_test.go @@ -0,0 +1,293 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package ssoadmin_test + +import ( + "context" + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccSSOAdminTrustedTokenIssuer_basic(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssoadmin_trusted_token_issuer.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustedTokenIssuerConfigBase_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_type", string(types.TrustedTokenIssuerTypeOidcJwt)), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.claim_attribute_path", "email"), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.identity_store_attribute_path", "emails.value"), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.issuer_url", "https://example.com"), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.jwks_retrieval_option", string(types.JwksRetrievalOptionOpenIdDiscovery)), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccSSOAdminTrustedTokenIssuer_update(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rNameUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssoadmin_trusted_token_issuer.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustedTokenIssuerConfigBase_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.claim_attribute_path", "email"), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.identity_store_attribute_path", "emails.value"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTrustedTokenIssuerConfigBase_basicUpdated(rNameUpdated, "name", "userName"), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "name", rNameUpdated), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.claim_attribute_path", "name"), + resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.identity_store_attribute_path", "userName"), + ), + }, + }, + }) +} + +func TestAccSSOAdminTrustedTokenIssuer_disappears(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssoadmin_trusted_token_issuer.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustedTokenIssuerConfigBase_basic(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, ssoadmin.ResourceTrustedTokenIssuer(), resourceName), + ), + ExpectNonEmptyPlan: true, + }, + }, + }) +} + +func TestAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_ssoadmin_trusted_token_issuer.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); acctest.PreCheckSSOAdminInstances(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTrustedTokenIssuerDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTrustedTokenIssuerConfigBase_tags(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccTrustedTokenIssuerConfigBase_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccTrustedTokenIssuerConfigBase_tags(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + +func testAccCheckTrustedTokenIssuerExists(ctx context.Context, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) + + _, err := ssoadmin.FindTrustedTokenIssuerByARN(ctx, conn, rs.Primary.ID) + + return err + } +} + +func testAccCheckTrustedTokenIssuerDestroy(ctx context.Context) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) + + for _, rs := range s.RootModule().Resources { + if rs.Type != "aws_ssoadmin_trusted_token_issuer" { + continue + } + + _, err := ssoadmin.FindTrustedTokenIssuerByARN(ctx, conn, rs.Primary.ID) + + if tfresource.NotFound(err) { + continue + } + + if err != nil { + return err + } + + return fmt.Errorf("SSO Admin Trusted Token Issuer %s still exists", rs.Primary.ID) + } + + return nil + } +} + +func testAccTrustedTokenIssuerConfigBase_basic(rName string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_trusted_token_issuer" "test" { + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = "email" + identity_store_attribute_path = "emails.value" + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } +} +`, rName) +} + +func testAccTrustedTokenIssuerConfigBase_basicUpdated(rNameUpdated, claimAttributePath, identityStoreAttributePath string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_trusted_token_issuer" "test" { + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = %[2]q + identity_store_attribute_path = %[3]q + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } +} +`, rNameUpdated, claimAttributePath, identityStoreAttributePath) +} + +func testAccTrustedTokenIssuerConfigBase_tags(rName, tagKey, tagValue string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_trusted_token_issuer" "test" { + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = "email" + identity_store_attribute_path = "emails.value" + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, tagKey, tagValue) +} + +func testAccTrustedTokenIssuerConfigBase_tags2(rName, tagKey, tagValue, tagKey2, tagValue2 string) string { + return fmt.Sprintf(` +data "aws_ssoadmin_instances" "test" {} + +resource "aws_ssoadmin_trusted_token_issuer" "test" { + name = %[1]q + instance_arn = tolist(data.aws_ssoadmin_instances.test.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = "email" + identity_store_attribute_path = "emails.value" + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, tagKey, tagValue, tagKey2, tagValue2) +} diff --git a/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown b/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown new file mode 100644 index 00000000000..1748b9fb15c --- /dev/null +++ b/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown @@ -0,0 +1,84 @@ +--- +subcategory: "SSO Admin" +layout: "aws" +page_title: "AWS: aws_ssoadmin_trusted_token_issuer" +description: |- + Terraform resource for managing an AWS SSO Admin Trusted Token Issuer. +--- +# Resource: aws_ssoadmin_trusted_token_issuer + +Terraform resource for managing an AWS SSO Admin Trusted Token Issuer. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_ssoadmin_instances" "example" {} + +resource "aws_ssoadmin_trusted_token_issuer" "example" { + name = "example" + instance_arn = tolist(data.aws_ssoadmin_instances.example.arns)[0] + trusted_token_issuer_type = "OIDC_JWT" + + trusted_token_issuer_configuration { + oidc_jwt_configuration { + claim_attribute_path = "email" + identity_store_attribute_path = "emails.value" + issuer_url = "https://example.com" + jwks_retrieval_option = "OPEN_ID_DISCOVERY" + } + } +} +``` + +## Argument Reference + +The following arguments are required: + +* `instance_arn` - (Required) ARN of the instance of IAM Identity Center. +* `name` - (Required) Name of the trusted token issuer. +* `trusted_token_issuer_configuration` - (Required) A block that specifies settings that apply to the trusted token issuer, these change depending on the type you specify in `trusted_token_issuer_type`. [Documented below](#trusted_token_issuer_configuration-argument-reference). +* `trusted_token_issuer_type` - (Required) Specifies the type of the trusted token issuer. Valid values are `OIDC_JWT` + +The following arguments are optional: + +* `client_token` - (Optional) A unique, case-sensitive ID that you provide to ensure the idempotency of the request. AWS generates a random value when not provided. +* `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. + +### `trusted_token_issuer_configuration` Argument Reference + +* `oidc_jwt_configuration` - (Optional) A block that describes the settings for a trusted token issuer that works with OpenID Connect (OIDC) by using JSON Web Tokens (JWT). See [Documented below](#oidc_jwt_configuration-argument-reference) below. + + +### `oidc_jwt_configuration` Argument Reference + +* `claim_attribute_path` - (Required) Specifies the path of the source attribute in the JWT from the trusted token issuer. +* `identity_store_attribute_path` - (Required) Specifies path of the destination attribute in a JWT from IAM Identity Center. The attribute mapped by this JMESPath expression is compared against the attribute mapped by `claim_attribute_path` when a trusted token issuer token is exchanged for an IAM Identity Center token. +* `issuer_url` - (Required) Specifies the URL that IAM Identity Center uses for OpenID Discovery. OpenID Discovery is used to obtain the information required to verify the tokens that the trusted token issuer generates. +* `jwks_retrieval_option` - (Required) The method that the trusted token issuer can use to retrieve the JSON Web Key Set used to verify a JWT. Valid values are `OPEN_ID_DISCOVERY` + +## Attribute Reference + +This resource exports the following attributes in addition to the arguments above: + +* `arn` - ARN of the trusted token issuer. +* `id` - ARN of the trusted token issuer. +* `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). + +## Import + +In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import SSO Admin Trusted Token Issuer using the `id`. For example: + +```terraform +import { + to = aws_ssoadmin_trusted_token_issuer.example + id = "arn:aws:sso::012345678901:trustedTokenIssuer/ssoins-lu1ye3gew4mbc7ju/tti-2657c556-9707-11ee-b9d1-0242ac120002" +} +``` + +Using `terraform import`, import SSO Admin Trusted Token Issuer using the `id`. For example: + +```console +% terraform import aws_ssoadmin_trusted_token_issuer.example arn:aws:sso::012345678901:trustedTokenIssuer/ssoins-lu1ye3gew4mbc7ju/tti-2657c556-9707-11ee-b9d1-0242ac120002 +``` \ No newline at end of file From f7f3ae06fd0142b42c621a40fa54bd7cb01672eb Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 04:06:44 +0100 Subject: [PATCH 065/438] chore: added changelog --- .changelog/34839.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34839.txt diff --git a/.changelog/34839.txt b/.changelog/34839.txt new file mode 100644 index 00000000000..d099346ca28 --- /dev/null +++ b/.changelog/34839.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_ssoadmin_trusted_token_issuer +``` From 72e270e8c841697f065150489e8dca6d812e7636 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 04:07:58 +0100 Subject: [PATCH 066/438] chore: comment nit --- internal/service/ssoadmin/trusted_token_issuer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index 6f7c88c1b7f..d6642e2e441 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -194,7 +194,7 @@ func resourceTrustedTokenIssuerUpdate(ctx context.Context, d *schema.ResourceDat } } - // listTags requires both trusted token issuer and instance ARN, so must be called + // updateTags requires both trusted token issuer and instance ARN, so must be called // explicitly rather than with transparent tagging. if d.HasChange("tags_all") { oldTagsAll, newTagsAll := d.GetChange("tags_all") From 019644bab9eb995c6074e3860c49a8bcb394b45e Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 04:09:38 +0100 Subject: [PATCH 067/438] chore: formatted docs --- website/docs/r/ssoadmin_trusted_token_issuer.html.markdown | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown b/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown index 1748b9fb15c..eb4e58c1b09 100644 --- a/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown +++ b/website/docs/r/ssoadmin_trusted_token_issuer.html.markdown @@ -50,11 +50,10 @@ The following arguments are optional: * `oidc_jwt_configuration` - (Optional) A block that describes the settings for a trusted token issuer that works with OpenID Connect (OIDC) by using JSON Web Tokens (JWT). See [Documented below](#oidc_jwt_configuration-argument-reference) below. - ### `oidc_jwt_configuration` Argument Reference * `claim_attribute_path` - (Required) Specifies the path of the source attribute in the JWT from the trusted token issuer. -* `identity_store_attribute_path` - (Required) Specifies path of the destination attribute in a JWT from IAM Identity Center. The attribute mapped by this JMESPath expression is compared against the attribute mapped by `claim_attribute_path` when a trusted token issuer token is exchanged for an IAM Identity Center token. +* `identity_store_attribute_path` - (Required) Specifies path of the destination attribute in a JWT from IAM Identity Center. The attribute mapped by this JMESPath expression is compared against the attribute mapped by `claim_attribute_path` when a trusted token issuer token is exchanged for an IAM Identity Center token. * `issuer_url` - (Required) Specifies the URL that IAM Identity Center uses for OpenID Discovery. OpenID Discovery is used to obtain the information required to verify the tokens that the trusted token issuer generates. * `jwks_retrieval_option` - (Required) The method that the trusted token issuer can use to retrieve the JSON Web Key Set used to verify a JWT. Valid values are `OPEN_ID_DISCOVERY` @@ -81,4 +80,4 @@ Using `terraform import`, import SSO Admin Trusted Token Issuer using the `id`. ```console % terraform import aws_ssoadmin_trusted_token_issuer.example arn:aws:sso::012345678901:trustedTokenIssuer/ssoins-lu1ye3gew4mbc7ju/tti-2657c556-9707-11ee-b9d1-0242ac120002 -``` \ No newline at end of file +``` From bd0522f215cdf4c39462d46930e9c87001090e6e Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 04:19:49 +0100 Subject: [PATCH 068/438] chore: updated function names semgrep find --- internal/service/ssoadmin/trusted_token_issuer.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index d6642e2e441..12a84aed6fd 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -277,14 +277,14 @@ func expandTrustedTokenIssuerConfiguration(tfMap []interface{}) types.TrustedTok if v, ok := tfList["oidc_jwt_configuration"]; ok { return &types.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration{ - Value: expandOidcJwtConfiguration(v.([]interface{})), + Value: expandOIDCJWTConfiguration(v.([]interface{})), } } return nil } -func expandOidcJwtConfiguration(tfMap []interface{}) types.OidcJwtConfiguration { +func expandOIDCJWTConfiguration(tfMap []interface{}) types.OidcJwtConfiguration { apiObject := types.OidcJwtConfiguration{} if len(tfMap) == 0 { @@ -327,14 +327,14 @@ func expandTrustedTokenIssuerUpdateConfiguration(tfMap []interface{}) types.Trus if v, ok := tfList["oidc_jwt_configuration"]; ok { return &types.TrustedTokenIssuerUpdateConfigurationMemberOidcJwtConfiguration{ - Value: expandOidcJwtUpdateConfiguration(v.([]interface{})), + Value: expandOIDCJWTUpdateConfiguration(v.([]interface{})), } } return nil } -func expandOidcJwtUpdateConfiguration(tfMap []interface{}) types.OidcJwtUpdateConfiguration { +func expandOIDCJWTUpdateConfiguration(tfMap []interface{}) types.OidcJwtUpdateConfiguration { apiObject := types.OidcJwtUpdateConfiguration{} if len(tfMap) == 0 { @@ -370,7 +370,7 @@ func flattenTrustedTokenIssuerConfiguration(apiObject types.TrustedTokenIssuerCo switch v := apiObject.(type) { case *types.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration: - tfMap["oidc_jwt_configuration"] = flattenOidcJwtConfiguration(v.Value) + tfMap["oidc_jwt_configuration"] = flattenOIDCJWTConfiguration(v.Value) default: log.Println("union is nil or unknown type") } @@ -378,7 +378,7 @@ func flattenTrustedTokenIssuerConfiguration(apiObject types.TrustedTokenIssuerCo return []interface{}{tfMap} } -func flattenOidcJwtConfiguration(apiObject types.OidcJwtConfiguration) []interface{} { +func flattenOIDCJWTConfiguration(apiObject types.OidcJwtConfiguration) []interface{} { tfMap := map[string]interface{}{} if v := apiObject.ClaimAttributePath; v != nil { From ded38bc97f525b989e1890864796ae79d43f696d Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 04:30:51 +0100 Subject: [PATCH 069/438] feat: removed hardcoded partition --- internal/service/ssoadmin/trusted_token_issuer.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index 12a84aed6fd..a32c0bed394 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -150,7 +150,7 @@ func resourceTrustedTokenIssuerRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading SSO Trusted Token Issuer (%s): %s", d.Id(), err) } - instanceARN, _ := TrustedTokenIssuerParseInstanceID(d.Id()) + instanceARN, _ := TrustedTokenIssuerParseInstanceID(meta.(*conns.AWSClient), d.Id()) d.Set("name", output.Name) d.Set("arn", output.TrustedTokenIssuerArn) @@ -255,11 +255,11 @@ func FindTrustedTokenIssuerByARN(ctx context.Context, conn *ssoadmin.Client, tru // Instance ID is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. // Instance ID can be extracted from the Trusted Token Issuer ARN -func TrustedTokenIssuerParseInstanceID(id string) (string, error) { +func TrustedTokenIssuerParseInstanceID(conn *conns.AWSClient, id string) (string, error) { parts := strings.Split(id, "/") if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { - return fmt.Sprintf("arn:aws:sso:::instance/%s", parts[1]), nil + return fmt.Sprintf("arn:%s:sso:::instance/%s", conn.Partition, parts[1]), nil } return "", fmt.Errorf("unable to determine Instance ID from Trusted Token Issuer ARN: %s", id) From bdfa4f037f1f2bc5df9c836c54ba8a0e7b53c60f Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 04:34:01 +0100 Subject: [PATCH 070/438] chore: fixed doc --- internal/service/ssoadmin/trusted_token_issuer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index a32c0bed394..bc23fe276b3 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -254,7 +254,7 @@ func FindTrustedTokenIssuerByARN(ctx context.Context, conn *ssoadmin.Client, tru } // Instance ID is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. -// Instance ID can be extracted from the Trusted Token Issuer ARN +// Instance ID can be extracted from the Trusted Token Issuer ARN. func TrustedTokenIssuerParseInstanceID(conn *conns.AWSClient, id string) (string, error) { parts := strings.Split(id, "/") From 7a41aa742f86f0122d49fcaf4460581b4ec2b8c8 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Sun, 10 Dec 2023 12:54:05 +0100 Subject: [PATCH 071/438] chore: renamed instance ARN --- internal/service/ssoadmin/trusted_token_issuer.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index bc23fe276b3..6b103f4f3dd 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -150,7 +150,7 @@ func resourceTrustedTokenIssuerRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "reading SSO Trusted Token Issuer (%s): %s", d.Id(), err) } - instanceARN, _ := TrustedTokenIssuerParseInstanceID(meta.(*conns.AWSClient), d.Id()) + instanceARN, _ := TrustedTokenIssuerParseInstanceARN(meta.(*conns.AWSClient), d.Id()) d.Set("name", output.Name) d.Set("arn", output.TrustedTokenIssuerArn) @@ -253,16 +253,16 @@ func FindTrustedTokenIssuerByARN(ctx context.Context, conn *ssoadmin.Client, tru return output, nil } -// Instance ID is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. -// Instance ID can be extracted from the Trusted Token Issuer ARN. -func TrustedTokenIssuerParseInstanceID(conn *conns.AWSClient, id string) (string, error) { +// Instance ARN is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. +// Instance ARN can be extracted from the Trusted Token Issuer ARN. +func TrustedTokenIssuerParseInstanceARN(conn *conns.AWSClient, id string) (string, error) { parts := strings.Split(id, "/") if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { return fmt.Sprintf("arn:%s:sso:::instance/%s", conn.Partition, parts[1]), nil } - return "", fmt.Errorf("unable to determine Instance ID from Trusted Token Issuer ARN: %s", id) + return "", fmt.Errorf("unable to construct Instance ARN from Trusted Token Issuer ARN: %s", id) } func expandTrustedTokenIssuerConfiguration(tfMap []interface{}) types.TrustedTokenIssuerConfiguration { From 3e1494dc0eb37063f2f5630d0ce40fca2147f786 Mon Sep 17 00:00:00 2001 From: hsiam261 Date: Mon, 11 Dec 2023 03:10:17 +0600 Subject: [PATCH 072/438] Wait for import to become complete during dynamodb table creation if import fails i.e if import doesn't lead to completed status with timeout period or fails due to some error before that, we will throw an exception. --- internal/service/dynamodb/status.go | 15 +++++++++++++++ internal/service/dynamodb/table.go | 7 ++++++- internal/service/dynamodb/wait.go | 17 +++++++++++++++++ 3 files changed, 38 insertions(+), 1 deletion(-) diff --git a/internal/service/dynamodb/status.go b/internal/service/dynamodb/status.go index 4eb294b3641..b6c5d92c88d 100644 --- a/internal/service/dynamodb/status.go +++ b/internal/service/dynamodb/status.go @@ -49,6 +49,21 @@ func statusTable(ctx context.Context, conn *dynamodb.DynamoDB, tableName string) } } +func statusImport(ctx context.Context, conn *dynamodb.DynamoDB, importArn string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + describeImportInput := &dynamodb.DescribeImportInput{ + ImportArn: &importArn, + } + output, err := conn.DescribeImportWithContext(ctx, describeImportInput) + + if err != nil { + return nil, "", err + } + + return output, aws.StringValue(output.ImportTableDescription.ImportStatus), nil + } +} + func statusReplicaUpdate(ctx context.Context, conn *dynamodb.DynamoDB, tableName, region string) retry.StateRefreshFunc { return func() (interface{}, string, error) { result, err := conn.DescribeTableWithContext(ctx, &dynamodb.DescribeTableInput{ diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index 422c1b54202..c61acaf9369 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -602,7 +602,7 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter input.TableCreationParameters = tcp - _, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (interface{}, error) { + importTableOutput, err := tfresource.RetryWhen(ctx, createTableTimeout, func() (interface{}, error) { return conn.ImportTableWithContext(ctx, input) }, func(err error) (bool, error) { if tfawserr.ErrCodeEquals(err, "ThrottlingException") { @@ -621,6 +621,11 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter if err != nil { return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, tableName, err) } + + importArn := importTableOutput.(*dynamodb.ImportTableOutput).ImportTableDescription.ImportArn + if _, err = waitImportComplete(ctx, conn, *importArn, d.Timeout(schema.TimeoutCreate)); err != nil { + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, d.Id(), err) + } } else { input := &dynamodb.CreateTableInput{ BillingMode: aws.String(d.Get("billing_mode").(string)), diff --git a/internal/service/dynamodb/wait.go b/internal/service/dynamodb/wait.go index ab26a11dae6..b83d49dca5c 100644 --- a/internal/service/dynamodb/wait.go +++ b/internal/service/dynamodb/wait.go @@ -77,6 +77,23 @@ func waitTableActive(ctx context.Context, conn *dynamodb.DynamoDB, tableName str return nil, err } +func waitImportComplete(ctx context.Context, conn *dynamodb.DynamoDB, importArn string, timeout time.Duration) (*dynamodb.DescribeImportOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{dynamodb.ImportStatusInProgress}, + Target: []string{dynamodb.ImportStatusCompleted}, + Timeout: maxDuration(createTableTimeout, timeout), + Refresh: statusImport(ctx, conn, importArn), + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dynamodb.DescribeImportOutput); ok { + return output, err + } + + return nil, err +} + func waitTableDeleted(ctx context.Context, conn *dynamodb.DynamoDB, tableName string, timeout time.Duration) (*dynamodb.TableDescription, error) { stateConf := &retry.StateChangeConf{ Pending: []string{dynamodb.TableStatusActive, dynamodb.TableStatusDeleting}, From 3ca46dcf937604a9ba61a9dcdde51ca3c965d003 Mon Sep 17 00:00:00 2001 From: Yevgeniy Brikman Date: Mon, 11 Dec 2023 16:56:40 +0000 Subject: [PATCH 073/438] Fix IPAM allocation timeout Fixes https://github.com/hashicorp/terraform-provider-aws/issues/30165. IPAM can take ~30 minutes to release allocations, so the 20 minute time out is not long enough. This PR increases the timeout to 35 minutes. --- internal/service/ec2/vpc_.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/ec2/vpc_.go b/internal/service/ec2/vpc_.go index e6cb0dd5d46..45c62b56076 100644 --- a/internal/service/ec2/vpc_.go +++ b/internal/service/ec2/vpc_.go @@ -482,7 +482,7 @@ func resourceVPCDelete(ctx context.Context, d *schema.ResourceData, meta interfa } if ipamPoolID != "" && ipamPoolID != amazonIPv6PoolID { const ( - timeout = 20 * time.Minute // IPAM eventual consistency + timeout = 35 * time.Minute // IPAM eventual consistency. It can take ~30 min to release allocations. ) _, err := tfresource.RetryUntilNotFound(ctx, timeout, func() (interface{}, error) { return findIPAMPoolAllocationsForVPC(ctx, conn, ipamPoolID, d.Id()) From ee26ecfa4915b0a9de93920a5a5834fd9866991d Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 11 Dec 2023 19:33:08 +0100 Subject: [PATCH 074/438] chore: migrate to plugin-framework --- internal/service/ssoadmin/exports_test.go | 2 + .../service/ssoadmin/service_package_gen.go | 10 +- .../service/ssoadmin/trusted_token_issuer.go | 601 +++++++++++------- .../ssoadmin/trusted_token_issuer_test.go | 57 +- 4 files changed, 409 insertions(+), 261 deletions(-) diff --git a/internal/service/ssoadmin/exports_test.go b/internal/service/ssoadmin/exports_test.go index 4ab0999ae72..3ff2a1929a1 100644 --- a/internal/service/ssoadmin/exports_test.go +++ b/internal/service/ssoadmin/exports_test.go @@ -8,8 +8,10 @@ var ( ResourceApplication = newResourceApplication ResourceApplicationAssignment = newResourceApplicationAssignment ResourceApplicationAssignmentConfiguration = newResourceApplicationAssignmentConfiguration + ResourceTrustedTokenIssuer = newResourceTrustedTokenIssuer FindApplicationByID = findApplicationByID FindApplicationAssignmentByID = findApplicationAssignmentByID FindApplicationAssignmentConfigurationByID = findApplicationAssignmentConfigurationByID + FindTrustedTokenIssuerByARN = findTrustedTokenIssuerByARN ) diff --git a/internal/service/ssoadmin/service_package_gen.go b/internal/service/ssoadmin/service_package_gen.go index cea26e4b544..6ad0c85e1de 100644 --- a/internal/service/ssoadmin/service_package_gen.go +++ b/internal/service/ssoadmin/service_package_gen.go @@ -48,6 +48,11 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic Factory: newResourceApplicationAssignmentConfiguration, Name: "Application Assignment Configuration", }, + { + Factory: newResourceTrustedTokenIssuer, + Name: "Trusted Token Issuer", + Tags: &types.ServicePackageResourceTags{}, + }, } } @@ -96,11 +101,6 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Factory: ResourcePermissionsBoundaryAttachment, TypeName: "aws_ssoadmin_permissions_boundary_attachment", }, - { - Factory: ResourceTrustedTokenIssuer, - TypeName: "aws_ssoadmin_trusted_token_issuer", - Tags: &types.ServicePackageResourceTags{}, - }, } } diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index 6b103f4f3dd..2b3e17b0d26 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -5,87 +5,121 @@ package ssoadmin import ( "context" + "errors" "fmt" "log" "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" - "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + awstypes "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_ssoadmin_trusted_token_issuer") +// @FrameworkResource(name="Trusted Token Issuer") // @Tags -func ResourceTrustedTokenIssuer() *schema.Resource { - return &schema.Resource{ - CreateWithoutTimeout: resourceTrustedTokenIssuerCreate, - ReadWithoutTimeout: resourceTrustedTokenIssuerRead, - UpdateWithoutTimeout: resourceTrustedTokenIssuerUpdate, - DeleteWithoutTimeout: resourceTrustedTokenIssuerDelete, - - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Schema: map[string]*schema.Schema{ - "arn": { - Type: schema.TypeString, - Computed: true, - }, - "client_token": { - Type: schema.TypeString, - ForceNew: true, +func newResourceTrustedTokenIssuer(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceTrustedTokenIssuer{}, nil +} + +const ( + ResNameTrustedTokenIssuer = "Trusted Token Issuer" +) + +type resourceTrustedTokenIssuer struct { + framework.ResourceWithConfigure +} + +func (r *resourceTrustedTokenIssuer) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_ssoadmin_trusted_token_issuer" +} + +func (r *resourceTrustedTokenIssuer) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "arn": framework.ARNAttributeComputedOnly(), + "client_token": schema.StringAttribute{ Optional: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, }, - "instance_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidARN, + "id": framework.IDAttribute(), + "instance_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, }, - "name": { - Type: schema.TypeString, + "name": schema.StringAttribute{ Required: true, }, - "trusted_token_issuer_configuration": { - Type: schema.TypeList, + "trusted_token_issuer_type": schema.StringAttribute{ Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "oidc_jwt_configuration": { - Type: schema.TypeList, - Required: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "claim_attribute_path": { - Type: schema.TypeString, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, + Validators: []validator.String{ + enum.FrameworkValidate[awstypes.TrustedTokenIssuerType](), + }, + }, + + names.AttrTags: tftags.TagsAttribute(), + names.AttrTagsAll: tftags.TagsAttributeComputedOnly(), + }, + Blocks: map[string]schema.Block{ + "trusted_token_issuer_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + NestedObject: schema.NestedBlockObject{ + Blocks: map[string]schema.Block{ + "oidc_jwt_configuration": schema.ListNestedBlock{ + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + listvalidator.IsRequired(), + }, + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "claim_attribute_path": schema.StringAttribute{ Required: true, }, - "identity_store_attribute_path": { - Type: schema.TypeString, + "identity_store_attribute_path": schema.StringAttribute{ Required: true, }, - "issuer_url": { - Type: schema.TypeString, + "issuer_url": schema.StringAttribute{ Required: true, - ForceNew: true, // Not part of OidcJwtUpdateConfiguration struct, have to recreate at change + PlanModifiers: []planmodifier.String{ // Not part of OidcJwtUpdateConfiguration struct, have to recreate at change + stringplanmodifier.RequiresReplace(), + }, }, - "jwks_retrieval_option": { - Type: schema.TypeString, - Required: true, - ValidateDiagFunc: enum.Validate[types.JwksRetrievalOption](), + "jwks_retrieval_option": schema.StringAttribute{ + Required: true, + Validators: []validator.String{ + enum.FrameworkValidate[awstypes.JwksRetrievalOption](), + }, }, }, }, @@ -93,307 +127,406 @@ func ResourceTrustedTokenIssuer() *schema.Resource { }, }, }, - "trusted_token_issuer_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.TrustedTokenIssuerType](), - }, - names.AttrTags: tftags.TagsSchema(), - names.AttrTagsAll: tftags.TagsSchemaComputed(), }, - - CustomizeDiff: verify.SetTagsDiff, } } -func resourceTrustedTokenIssuerCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) +func (r *resourceTrustedTokenIssuer) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().SSOAdminClient(ctx) - input := &ssoadmin.CreateTrustedTokenIssuerInput{ - InstanceArn: aws.String(d.Get("instance_arn").(string)), - Name: aws.String(d.Get("name").(string)), - TrustedTokenIssuerConfiguration: expandTrustedTokenIssuerConfiguration(d.Get("trusted_token_issuer_configuration").([]interface{})), - TrustedTokenIssuerType: types.TrustedTokenIssuerType(d.Get("trusted_token_issuer_type").(string)), - Tags: getTagsIn(ctx), + var plan resourceTrustedTokenIssuerData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return } - if v, ok := d.GetOk("client_token"); ok { - input.ClientToken = aws.String(v.(string)) + in := &ssoadmin.CreateTrustedTokenIssuerInput{ + InstanceArn: aws.String(plan.InstanceARN.ValueString()), + Name: aws.String(plan.Name.ValueString()), + TrustedTokenIssuerType: awstypes.TrustedTokenIssuerType(plan.TrustedTokenIssuerType.ValueString()), + Tags: getTagsIn(ctx), } - output, err := conn.CreateTrustedTokenIssuer(ctx, input) + if !plan.ClientToken.IsNull() { + in.ClientToken = aws.String(plan.ClientToken.ValueString()) + } + if !plan.TrustedTokenIssuerConfiguration.IsNull() { + var tfList []TrustedTokenIssuerConfigurationData + resp.Diagnostics.Append(plan.TrustedTokenIssuerConfiguration.ElementsAs(ctx, &tfList, false)...) + if resp.Diagnostics.HasError() { + return + } + + trustedTokenIssuerConfiguration, d := expandTrustedTokenIssuerConfiguration(ctx, tfList) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + in.TrustedTokenIssuerConfiguration = trustedTokenIssuerConfiguration + } + + out, err := conn.CreateTrustedTokenIssuer(ctx, in) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating SSO Trusted Token Issuer (%s): %s", d.Get("name").(string), err) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionCreating, ResNameTrustedTokenIssuer, plan.Name.String(), err), + err.Error(), + ) + return } - d.SetId(aws.ToString(output.TrustedTokenIssuerArn)) + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionCreating, ResNameTrustedTokenIssuer, plan.Name.String(), nil), + errors.New("empty output").Error(), + ) + return + } - return append(diags, resourceTrustedTokenIssuerRead(ctx, d, meta)...) -} + plan.ARN = flex.StringToFramework(ctx, out.TrustedTokenIssuerArn) + plan.ID = flex.StringToFramework(ctx, out.TrustedTokenIssuerArn) -func resourceTrustedTokenIssuerRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) +} - output, err := FindTrustedTokenIssuerByARN(ctx, conn, d.Id()) +func (r *resourceTrustedTokenIssuer) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().SSOAdminClient(ctx) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] SSO Trusted Token Issuer (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags + var state resourceTrustedTokenIssuerData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return } + out, err := findTrustedTokenIssuerByARN(ctx, conn, state.ID.ValueString()) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return + } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading SSO Trusted Token Issuer (%s): %s", d.Id(), err) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionSetting, ResNameTrustedTokenIssuer, state.ID.String(), err), + err.Error(), + ) + return } - instanceARN, _ := TrustedTokenIssuerParseInstanceARN(meta.(*conns.AWSClient), d.Id()) + instanceARN, _ := TrustedTokenIssuerParseInstanceARN(r.Meta(), aws.ToString(out.TrustedTokenIssuerArn)) + + state.ARN = flex.StringToFramework(ctx, out.TrustedTokenIssuerArn) + state.Name = flex.StringToFramework(ctx, out.Name) + state.ID = flex.StringToFramework(ctx, out.TrustedTokenIssuerArn) + state.InstanceARN = flex.StringToFrameworkARN(ctx, aws.String(instanceARN)) + state.TrustedTokenIssuerType = flex.StringValueToFramework(ctx, out.TrustedTokenIssuerType) - d.Set("name", output.Name) - d.Set("arn", output.TrustedTokenIssuerArn) - d.Set("instance_arn", instanceARN) - d.Set("trusted_token_issuer_configuration", flattenTrustedTokenIssuerConfiguration(output.TrustedTokenIssuerConfiguration)) - d.Set("trusted_token_issuer_type", output.TrustedTokenIssuerType) + trustedTokenIssuerConfiguration, d := flattenTrustedTokenIssuerConfiguration(ctx, out.TrustedTokenIssuerConfiguration) + resp.Diagnostics.Append(d...) + state.TrustedTokenIssuerConfiguration = trustedTokenIssuerConfiguration // listTags requires both trusted token issuer and instance ARN, so must be called // explicitly rather than with transparent tagging. - tags, err := listTags(ctx, conn, d.Id(), instanceARN) + tags, err := listTags(ctx, conn, state.ARN.ValueString(), state.InstanceARN.ValueString()) if err != nil { - return sdkdiag.AppendErrorf(diags, "reading SSO Trusted Token Issuer (%s): %s", d.Id(), err) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionSetting, ResNameTrustedTokenIssuer, state.ID.String(), err), + err.Error(), + ) + return } - setTagsOut(ctx, Tags(tags)) - return diags + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } -func resourceTrustedTokenIssuerUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) +func (r *resourceTrustedTokenIssuer) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + conn := r.Meta().SSOAdminClient(ctx) - if d.HasChangesExcept("tags", "tags_all") { - input := &ssoadmin.UpdateTrustedTokenIssuerInput{ - TrustedTokenIssuerArn: aws.String(d.Id()), - } + var plan, state resourceTrustedTokenIssuerData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return + } - if d.HasChange("name") { - input.Name = aws.String(d.Get("name").(string)) + if !plan.Name.Equal(state.Name) || !plan.TrustedTokenIssuerConfiguration.Equal(state.TrustedTokenIssuerConfiguration) { + + in := &ssoadmin.UpdateTrustedTokenIssuerInput{ + TrustedTokenIssuerArn: aws.String(plan.ID.ValueString()), } - if d.HasChange("trusted_token_issuer_configuration") { - input.TrustedTokenIssuerConfiguration = expandTrustedTokenIssuerUpdateConfiguration(d.Get("trusted_token_issuer_configuration").([]interface{})) + if !plan.Name.IsNull() { + in.Name = aws.String(plan.Name.ValueString()) } - _, err := conn.UpdateTrustedTokenIssuer(ctx, input) + if !plan.TrustedTokenIssuerConfiguration.IsNull() { + var tfList []TrustedTokenIssuerConfigurationData + resp.Diagnostics.Append(plan.TrustedTokenIssuerConfiguration.ElementsAs(ctx, &tfList, false)...) + if resp.Diagnostics.HasError() { + return + } + + trustedTokenIssuerUpdateConfiguration, d := expandTrustedTokenIssuerUpdateConfiguration(ctx, tfList) + resp.Diagnostics.Append(d...) + if resp.Diagnostics.HasError() { + return + } + in.TrustedTokenIssuerConfiguration = trustedTokenIssuerUpdateConfiguration + } + out, err := conn.UpdateTrustedTokenIssuer(ctx, in) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating SSO Trusted Token Issuer (%s): %s", d.Id(), err) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionUpdating, ResNameTrustedTokenIssuer, plan.ID.String(), err), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionUpdating, ResNameTrustedTokenIssuer, plan.ID.String(), nil), + errors.New("empty output").Error(), + ) + return } } - // updateTags requires both trusted token issuer and instance ARN, so must be called + // updateTags requires both application and instance ARN, so must be called // explicitly rather than with transparent tagging. - if d.HasChange("tags_all") { - oldTagsAll, newTagsAll := d.GetChange("tags_all") - if err := updateTags(ctx, conn, d.Id(), d.Get("instance_arn").(string), oldTagsAll, newTagsAll); err != nil { - return sdkdiag.AppendErrorf(diags, "updating SSO Trusted Token Issuer (%s): %s", d.Id(), err) + if oldTagsAll, newTagsAll := state.TagsAll, plan.TagsAll; !newTagsAll.Equal(oldTagsAll) { + if err := updateTags(ctx, conn, plan.ID.ValueString(), plan.InstanceARN.ValueString(), oldTagsAll, newTagsAll); err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionUpdating, ResNameTrustedTokenIssuer, plan.ID.String(), err), + err.Error(), + ) + return } } - return append(diags, resourceTrustedTokenIssuerRead(ctx, d, meta)...) + resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...) } -func resourceTrustedTokenIssuerDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) +func (r *resourceTrustedTokenIssuer) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().SSOAdminClient(ctx) - input := &ssoadmin.DeleteTrustedTokenIssuerInput{ - TrustedTokenIssuerArn: aws.String(d.Id()), + var state resourceTrustedTokenIssuerData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return } - log.Printf("[INFO] Deleting SSO Trusted Token Issuer: %s", d.Id()) - _, err := conn.DeleteTrustedTokenIssuer(ctx, input) - - if errs.IsA[*types.ResourceNotFoundException](err) { - return diags + in := &ssoadmin.DeleteTrustedTokenIssuerInput{ + TrustedTokenIssuerArn: aws.String(state.ID.ValueString()), } + _, err := conn.DeleteTrustedTokenIssuer(ctx, in) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting SSO Trusted Token Issuer (%s): %s", d.Id(), err) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionDeleting, ResNameTrustedTokenIssuer, state.ID.String(), err), + err.Error(), + ) + return } - - return diags } -func FindTrustedTokenIssuerByARN(ctx context.Context, conn *ssoadmin.Client, trustedTokenIssuerARN string) (*ssoadmin.DescribeTrustedTokenIssuerOutput, error) { - input := &ssoadmin.DescribeTrustedTokenIssuerInput{ - TrustedTokenIssuerArn: aws.String(trustedTokenIssuerARN), - } +func (r *resourceTrustedTokenIssuer) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) +} - output, err := conn.DescribeTrustedTokenIssuer(ctx, input) +func (r *resourceTrustedTokenIssuer) ModifyPlan(ctx context.Context, req resource.ModifyPlanRequest, resp *resource.ModifyPlanResponse) { + r.SetTagsAll(ctx, req, resp) +} - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } +func findTrustedTokenIssuerByARN(ctx context.Context, conn *ssoadmin.Client, arn string) (*ssoadmin.DescribeTrustedTokenIssuerOutput, error) { + in := &ssoadmin.DescribeTrustedTokenIssuerInput{ + TrustedTokenIssuerArn: aws.String(arn), } + out, err := conn.DescribeTrustedTokenIssuer(ctx, in) if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } + } + return nil, err } - if output == nil { - return nil, tfresource.NewEmptyResultError(input) + if out == nil { + return nil, tfresource.NewEmptyResultError(in) } - return output, nil + return out, nil } -// Instance ARN is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. -// Instance ARN can be extracted from the Trusted Token Issuer ARN. -func TrustedTokenIssuerParseInstanceARN(conn *conns.AWSClient, id string) (string, error) { - parts := strings.Split(id, "/") +func expandTrustedTokenIssuerConfiguration(ctx context.Context, tfList []TrustedTokenIssuerConfigurationData) (awstypes.TrustedTokenIssuerConfiguration, diag.Diagnostics) { + var diags diag.Diagnostics - if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { - return fmt.Sprintf("arn:%s:sso:::instance/%s", conn.Partition, parts[1]), nil + if len(tfList) == 0 { + return nil, diags } + tfObj := tfList[0] - return "", fmt.Errorf("unable to construct Instance ARN from Trusted Token Issuer ARN: %s", id) -} + var OIDCJWTConfigurationData []OIDCJWTConfigurationData + diags.Append(tfObj.OIDCJWTConfiguration.ElementsAs(ctx, &OIDCJWTConfigurationData, false)...) -func expandTrustedTokenIssuerConfiguration(tfMap []interface{}) types.TrustedTokenIssuerConfiguration { - if len(tfMap) == 0 { - return nil + apiObject := &awstypes.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration{ + Value: *expandOIDCJWTConfiguration(OIDCJWTConfigurationData), } - tfList, ok := tfMap[0].(map[string]interface{}) - if !ok { - return nil - } + return apiObject, diags +} - if v, ok := tfList["oidc_jwt_configuration"]; ok { - return &types.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration{ - Value: expandOIDCJWTConfiguration(v.([]interface{})), - } +func expandOIDCJWTConfiguration(tfList []OIDCJWTConfigurationData) *awstypes.OidcJwtConfiguration { + if len(tfList) == 0 { + return nil } - return nil -} - -func expandOIDCJWTConfiguration(tfMap []interface{}) types.OidcJwtConfiguration { - apiObject := types.OidcJwtConfiguration{} + tfObj := tfList[0] - if len(tfMap) == 0 { - return apiObject + apiObject := &awstypes.OidcJwtConfiguration{ + ClaimAttributePath: aws.String(tfObj.ClaimAttributePath.ValueString()), + IdentityStoreAttributePath: aws.String(tfObj.IdentityStoreAttributePath.ValueString()), + IssuerUrl: aws.String(tfObj.IssuerUrl.ValueString()), + JwksRetrievalOption: awstypes.JwksRetrievalOption(tfObj.JWKSRetrievalOption.ValueString()), } - tfList, ok := tfMap[0].(map[string]interface{}) - if !ok { - return apiObject - } + return apiObject +} - if v, ok := tfList["claim_attribute_path"]; ok { - apiObject.ClaimAttributePath = aws.String(v.(string)) - } +func expandTrustedTokenIssuerUpdateConfiguration(ctx context.Context, tfList []TrustedTokenIssuerConfigurationData) (awstypes.TrustedTokenIssuerUpdateConfiguration, diag.Diagnostics) { + var diags diag.Diagnostics - if v, ok := tfList["identity_store_attribute_path"]; ok { - apiObject.IdentityStoreAttributePath = aws.String(v.(string)) + if len(tfList) == 0 { + return nil, diags } + tfObj := tfList[0] - if v, ok := tfList["issuer_url"]; ok { - apiObject.IssuerUrl = aws.String(v.(string)) - } + var OIDCJWTConfigurationData []OIDCJWTConfigurationData + diags.Append(tfObj.OIDCJWTConfiguration.ElementsAs(ctx, &OIDCJWTConfigurationData, false)...) - if v, ok := tfList["jwks_retrieval_option"]; ok { - apiObject.JwksRetrievalOption = types.JwksRetrievalOption(v.(string)) + apiObject := &awstypes.TrustedTokenIssuerUpdateConfigurationMemberOidcJwtConfiguration{ + Value: *expandOIDCJWTUpdateConfiguration(OIDCJWTConfigurationData), } - return apiObject + return apiObject, diags } -func expandTrustedTokenIssuerUpdateConfiguration(tfMap []interface{}) types.TrustedTokenIssuerUpdateConfiguration { - if len(tfMap) == 0 { +func expandOIDCJWTUpdateConfiguration(tfList []OIDCJWTConfigurationData) *awstypes.OidcJwtUpdateConfiguration { + if len(tfList) == 0 { return nil } - tfList, ok := tfMap[0].(map[string]interface{}) - if !ok { - return nil - } + tfObj := tfList[0] - if v, ok := tfList["oidc_jwt_configuration"]; ok { - return &types.TrustedTokenIssuerUpdateConfigurationMemberOidcJwtConfiguration{ - Value: expandOIDCJWTUpdateConfiguration(v.([]interface{})), - } + apiObject := &awstypes.OidcJwtUpdateConfiguration{ + ClaimAttributePath: aws.String(tfObj.ClaimAttributePath.ValueString()), + IdentityStoreAttributePath: aws.String(tfObj.IdentityStoreAttributePath.ValueString()), + JwksRetrievalOption: awstypes.JwksRetrievalOption(tfObj.JWKSRetrievalOption.ValueString()), } - return nil + return apiObject } -func expandOIDCJWTUpdateConfiguration(tfMap []interface{}) types.OidcJwtUpdateConfiguration { - apiObject := types.OidcJwtUpdateConfiguration{} +func flattenTrustedTokenIssuerConfiguration(ctx context.Context, apiObject awstypes.TrustedTokenIssuerConfiguration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: TrustedTokenIssuerConfigurationAttrTypes} - if len(tfMap) == 0 { - return apiObject + if apiObject == nil { + return types.ListNull(elemType), diags } - tfList, ok := tfMap[0].(map[string]interface{}) - if !ok { - return apiObject - } + obj := map[string]attr.Value{} - if v, ok := tfList["claim_attribute_path"]; ok { - apiObject.ClaimAttributePath = aws.String(v.(string)) + switch v := apiObject.(type) { + case *awstypes.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration: + oidcJWTConfiguration, d := flattenOIDCJWTConfiguration(ctx, &v.Value) + obj["oidc_jwt_configuration"] = oidcJWTConfiguration + diags.Append(d...) + default: + log.Println("union is nil or unknown type") } - if v, ok := tfList["identity_store_attribute_path"]; ok { - apiObject.IdentityStoreAttributePath = aws.String(v.(string)) - } + objVal, d := types.ObjectValue(TrustedTokenIssuerConfigurationAttrTypes, obj) + diags.Append(d...) - if v, ok := tfList["jwks_retrieval_option"]; ok { - apiObject.JwksRetrievalOption = types.JwksRetrievalOption(v.(string)) - } + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) - return apiObject + return listVal, diags } -func flattenTrustedTokenIssuerConfiguration(apiObject types.TrustedTokenIssuerConfiguration) []interface{} { +func flattenOIDCJWTConfiguration(ctx context.Context, apiObject *awstypes.OidcJwtConfiguration) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + elemType := types.ObjectType{AttrTypes: OIDCJWTConfigurationAttrTypes} + if apiObject == nil { - return nil + return types.ListNull(elemType), diags } - tfMap := map[string]interface{}{} - - switch v := apiObject.(type) { - case *types.TrustedTokenIssuerConfigurationMemberOidcJwtConfiguration: - tfMap["oidc_jwt_configuration"] = flattenOIDCJWTConfiguration(v.Value) - default: - log.Println("union is nil or unknown type") + obj := map[string]attr.Value{ + "claim_attribute_path": flex.StringToFramework(ctx, apiObject.ClaimAttributePath), + "identity_store_attribute_path": flex.StringToFramework(ctx, apiObject.IdentityStoreAttributePath), + "issuer_url": flex.StringToFramework(ctx, apiObject.IssuerUrl), + "jwks_retrieval_option": flex.StringValueToFramework(ctx, apiObject.JwksRetrievalOption), } - return []interface{}{tfMap} + objVal, d := types.ObjectValue(OIDCJWTConfigurationAttrTypes, obj) + diags.Append(d...) + + listVal, d := types.ListValue(elemType, []attr.Value{objVal}) + diags.Append(d...) + + return listVal, diags } -func flattenOIDCJWTConfiguration(apiObject types.OidcJwtConfiguration) []interface{} { - tfMap := map[string]interface{}{} +// Instance ARN is not returned by DescribeTrustedTokenIssuer but is needed for schema consistency when importing and tagging. +// Instance ARN can be extracted from the Trusted Token Issuer ARN. +func TrustedTokenIssuerParseInstanceARN(conn *conns.AWSClient, id string) (string, diag.Diagnostics) { + var diags diag.Diagnostics + parts := strings.Split(id, "/") - if v := apiObject.ClaimAttributePath; v != nil { - tfMap["claim_attribute_path"] = aws.ToString(v) + if len(parts) == 3 && parts[0] != "" && parts[1] != "" && parts[2] != "" { + return fmt.Sprintf("arn:%s:sso:::instance/%s", conn.Partition, parts[1]), diags } - if v := apiObject.IdentityStoreAttributePath; v != nil { - tfMap["identity_store_attribute_path"] = aws.ToString(v) - } + return "", diags +} - if v := apiObject.IssuerUrl; v != nil { - tfMap["issuer_url"] = aws.ToString(v) - } +type resourceTrustedTokenIssuerData struct { + ARN types.String `tfsdk:"arn"` + ClientToken types.String `tfsdk:"client_token"` + ID types.String `tfsdk:"id"` + InstanceARN fwtypes.ARN `tfsdk:"instance_arn"` + Name types.String `tfsdk:"name"` + TrustedTokenIssuerConfiguration types.List `tfsdk:"trusted_token_issuer_configuration"` + TrustedTokenIssuerType types.String `tfsdk:"trusted_token_issuer_type"` + Tags types.Map `tfsdk:"tags"` + TagsAll types.Map `tfsdk:"tags_all"` +} + +type TrustedTokenIssuerConfigurationData struct { + OIDCJWTConfiguration types.List `tfsdk:"oidc_jwt_configuration"` +} - tfMap["jwks_retrieval_option"] = string(apiObject.JwksRetrievalOption) +type OIDCJWTConfigurationData struct { + ClaimAttributePath types.String `tfsdk:"claim_attribute_path"` + IdentityStoreAttributePath types.String `tfsdk:"identity_store_attribute_path"` + IssuerUrl types.String `tfsdk:"issuer_url"` + JWKSRetrievalOption types.String `tfsdk:"jwks_retrieval_option"` +} + +var TrustedTokenIssuerConfigurationAttrTypes = map[string]attr.Type{ + "oidc_jwt_configuration": types.ListType{ElemType: types.ObjectType{AttrTypes: OIDCJWTConfigurationAttrTypes}}, +} - return []interface{}{tfMap} +var OIDCJWTConfigurationAttrTypes = map[string]attr.Type{ + "claim_attribute_path": types.StringType, + "identity_store_attribute_path": types.StringType, + "issuer_url": types.StringType, + "jwks_retrieval_option": types.StringType, } diff --git a/internal/service/ssoadmin/trusted_token_issuer_test.go b/internal/service/ssoadmin/trusted_token_issuer_test.go index cc666fa438c..010e0b302e1 100644 --- a/internal/service/ssoadmin/trusted_token_issuer_test.go +++ b/internal/service/ssoadmin/trusted_token_issuer_test.go @@ -5,22 +5,26 @@ package ssoadmin_test import ( "context" + "errors" "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/service/ssoadmin" "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfssoadmin "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" "github.com/hashicorp/terraform-provider-aws/names" ) func TestAccSSOAdminTrustedTokenIssuer_basic(t *testing.T) { ctx := acctest.Context(t) + var application ssoadmin.DescribeTrustedTokenIssuerOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ssoadmin_trusted_token_issuer.test" @@ -33,7 +37,7 @@ func TestAccSSOAdminTrustedTokenIssuer_basic(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_type", string(types.TrustedTokenIssuerTypeOidcJwt)), resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.claim_attribute_path", "email"), @@ -53,6 +57,7 @@ func TestAccSSOAdminTrustedTokenIssuer_basic(t *testing.T) { func TestAccSSOAdminTrustedTokenIssuer_update(t *testing.T) { ctx := acctest.Context(t) + var application ssoadmin.DescribeTrustedTokenIssuerOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) rNameUpdated := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ssoadmin_trusted_token_issuer.test" @@ -66,7 +71,7 @@ func TestAccSSOAdminTrustedTokenIssuer_update(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.claim_attribute_path", "email"), resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.identity_store_attribute_path", "emails.value"), @@ -80,7 +85,7 @@ func TestAccSSOAdminTrustedTokenIssuer_update(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_basicUpdated(rNameUpdated, "name", "userName"), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "name", rNameUpdated), resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.claim_attribute_path", "name"), resource.TestCheckResourceAttr(resourceName, "trusted_token_issuer_configuration.0.oidc_jwt_configuration.0.identity_store_attribute_path", "userName"), @@ -92,6 +97,7 @@ func TestAccSSOAdminTrustedTokenIssuer_update(t *testing.T) { func TestAccSSOAdminTrustedTokenIssuer_disappears(t *testing.T) { ctx := acctest.Context(t) + var application ssoadmin.DescribeTrustedTokenIssuerOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ssoadmin_trusted_token_issuer.test" @@ -104,8 +110,8 @@ func TestAccSSOAdminTrustedTokenIssuer_disappears(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, ssoadmin.ResourceTrustedTokenIssuer(), resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfssoadmin.ResourceTrustedTokenIssuer, resourceName), ), ExpectNonEmptyPlan: true, }, @@ -115,6 +121,7 @@ func TestAccSSOAdminTrustedTokenIssuer_disappears(t *testing.T) { func TestAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { ctx := acctest.Context(t) + var application ssoadmin.DescribeTrustedTokenIssuerOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_ssoadmin_trusted_token_issuer.test" @@ -127,7 +134,7 @@ func TestAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_tags(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), ), @@ -140,7 +147,7 @@ func TestAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), @@ -149,7 +156,7 @@ func TestAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { { Config: testAccTrustedTokenIssuerConfigBase_tags(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( - testAccCheckTrustedTokenIssuerExists(ctx, resourceName), + testAccCheckTrustedTokenIssuerExists(ctx, resourceName, &application), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), ), @@ -158,18 +165,26 @@ func TestAccSSOAdminTrustedTokenIssuer_tags(t *testing.T) { }) } -func testAccCheckTrustedTokenIssuerExists(ctx context.Context, n string) resource.TestCheckFunc { +func testAccCheckTrustedTokenIssuerExists(ctx context.Context, name string, trustedTokenIssuer *ssoadmin.DescribeTrustedTokenIssuerOutput) resource.TestCheckFunc { return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] + rs, ok := s.RootModule().Resources[name] if !ok { - return fmt.Errorf("Not found: %s", n) + return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameTrustedTokenIssuer, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameTrustedTokenIssuer, name, errors.New("not set")) } conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) + resp, err := tfssoadmin.FindTrustedTokenIssuerByARN(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameTrustedTokenIssuer, rs.Primary.ID, err) + } - _, err := ssoadmin.FindTrustedTokenIssuerByARN(ctx, conn, rs.Primary.ID) + *trustedTokenIssuer = *resp - return err + return nil } } @@ -182,17 +197,15 @@ func testAccCheckTrustedTokenIssuerDestroy(ctx context.Context) resource.TestChe continue } - _, err := ssoadmin.FindTrustedTokenIssuerByARN(ctx, conn, rs.Primary.ID) - - if tfresource.NotFound(err) { - continue + _, err := tfssoadmin.FindTrustedTokenIssuerByARN(ctx, conn, rs.Primary.ID) + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil } - if err != nil { - return err + return create.Error(names.SSOAdmin, create.ErrActionCheckingDestroyed, tfssoadmin.ResNameTrustedTokenIssuer, rs.Primary.ID, err) } - return fmt.Errorf("SSO Admin Trusted Token Issuer %s still exists", rs.Primary.ID) + return create.Error(names.SSOAdmin, create.ErrActionCheckingDestroyed, tfssoadmin.ResNameTrustedTokenIssuer, rs.Primary.ID, errors.New("not destroyed")) } return nil From 695c458a9ead056a2f17f17d2038b14a2cde1ef4 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Mon, 11 Dec 2023 19:44:26 +0100 Subject: [PATCH 075/438] chore: fixed fmt --- internal/service/ssoadmin/trusted_token_issuer.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index 2b3e17b0d26..a5c29ad69bf 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -249,7 +249,6 @@ func (r *resourceTrustedTokenIssuer) Update(ctx context.Context, req resource.Up } if !plan.Name.Equal(state.Name) || !plan.TrustedTokenIssuerConfiguration.Equal(state.TrustedTokenIssuerConfiguration) { - in := &ssoadmin.UpdateTrustedTokenIssuerInput{ TrustedTokenIssuerArn: aws.String(plan.ID.ValueString()), } From 5cafc44c4579e3642c00ba3682fa8e1f0c1e05f1 Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Tue, 12 Dec 2023 17:46:14 +0100 Subject: [PATCH 076/438] chore: migrate to plugin-framework --- .../ssoadmin/application_access_scope.go | 239 ++++++++++-------- .../ssoadmin/application_access_scope_test.go | 66 ++--- internal/service/ssoadmin/exports_test.go | 2 + .../service/ssoadmin/service_package_gen.go | 8 +- 4 files changed, 178 insertions(+), 137 deletions(-) diff --git a/internal/service/ssoadmin/application_access_scope.go b/internal/service/ssoadmin/application_access_scope.go index 325a6c9d770..7e8dc6e1951 100644 --- a/internal/service/ssoadmin/application_access_scope.go +++ b/internal/service/ssoadmin/application_access_scope.go @@ -5,155 +5,188 @@ package ssoadmin import ( "context" + "errors" "fmt" - "log" "strings" - "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" - "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + awstypes "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" - "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" - "github.com/hashicorp/terraform-provider-aws/internal/flex" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/verify" + "github.com/hashicorp/terraform-provider-aws/names" ) -// @SDKResource("aws_ssoadmin_application_access_scope") -func ResourceApplicationAccessScope() *schema.Resource { - return &schema.Resource{ - CreateWithoutTimeout: resourceApplicationAccessScopeCreate, - ReadWithoutTimeout: resourceApplicationAccessScopeRead, - DeleteWithoutTimeout: resourceApplicationAccessScopeDelete, +// @FrameworkResource(name="Application Access Scope") +func newResourceApplicationAccessScope(_ context.Context) (resource.ResourceWithConfigure, error) { + return &resourceApplicationAccessScope{}, nil +} - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, +const ( + ResNameApplicationAccessScope = "Application Access Scope" +) - Timeouts: &schema.ResourceTimeout{ - Create: schema.DefaultTimeout(10 * time.Minute), - Delete: schema.DefaultTimeout(10 * time.Minute), - }, +type resourceApplicationAccessScope struct { + framework.ResourceWithConfigure +} + +func (r *resourceApplicationAccessScope) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) { + resp.TypeName = "aws_ssoadmin_application_access_scope" +} - Schema: map[string]*schema.Schema{ - "application_arn": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: verify.ValidARN, +func (r *resourceApplicationAccessScope) Schema(ctx context.Context, req resource.SchemaRequest, resp *resource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "application_arn": schema.StringAttribute{ + CustomType: fwtypes.ARNType, + Required: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, }, - "authorized_targets": { - Type: schema.TypeList, - Optional: true, - ForceNew: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: verify.ValidARN, + "authorized_targets": schema.ListAttribute{ + ElementType: types.StringType, + Optional: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.RequiresReplace(), }, }, - "scope": { - Type: schema.TypeString, + "id": framework.IDAttribute(), + "scope": schema.StringAttribute{ Required: true, - ForceNew: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.RequiresReplace(), + }, }, }, } } -func resourceApplicationAccessScopeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) +func (r *resourceApplicationAccessScope) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + conn := r.Meta().SSOAdminClient(ctx) - applicationARN := d.Get("application_arn").(string) - scope := d.Get("scope").(string) - id := ApplicationAccessScopeCreateResourceID(applicationARN, scope) - - input := &ssoadmin.PutApplicationAccessScopeInput{ - ApplicationArn: aws.String(applicationARN), - Scope: aws.String(scope), + var plan resourceApplicationAccessScopeData + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) + if resp.Diagnostics.HasError() { + return } - if v, ok := d.GetOk("authorized_targets"); ok { - input.AuthorizedTargets = flex.ExpandStringValueList(v.([]interface{})) + in := &ssoadmin.PutApplicationAccessScopeInput{ + ApplicationArn: aws.String(plan.ApplicationARN.ValueString()), + Scope: aws.String(plan.Scope.ValueString()), } - _, err := conn.PutApplicationAccessScope(ctx, input) + if !plan.AuthorizedTargets.IsNull() { + in.AuthorizedTargets = flex.ExpandFrameworkStringValueList(ctx, plan.AuthorizedTargets) + } + out, err := conn.PutApplicationAccessScope(ctx, in) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating SSO Application Access Scope (%s): %s", id, err) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionCreating, ResNameApplicationAccessScope, plan.ApplicationARN.String(), err), + err.Error(), + ) + return + } + if out == nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionCreating, ResNameApplicationAccessScope, plan.ApplicationARN.String(), nil), + errors.New("empty output").Error(), + ) + return } - d.SetId(id) + plan.ID = flex.StringToFramework(ctx, ApplicationAccessScopeCreateResourceID(plan.ApplicationARN.ValueString(), plan.Scope.ValueString())) - return append(diags, resourceApplicationAccessScopeRead(ctx, d, meta)...) + resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } -func resourceApplicationAccessScopeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) +func (r *resourceApplicationAccessScope) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) { + conn := r.Meta().SSOAdminClient(ctx) - applicationARN, scope, err := ApplicationAccessScopeParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) + var state resourceApplicationAccessScopeData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return } - output, err := FindApplicationAccessScopeByScopeAndApplicationARN(ctx, conn, applicationARN, scope) + applicationARN, scope, err := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) - if !d.IsNewResource() && tfresource.NotFound(err) { - log.Printf("[WARN] SSO Application Access Scope (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags + out, err := findApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + if tfresource.NotFound(err) { + resp.State.RemoveResource(ctx) + return } - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading SSO Application Access Scope (%s): %s", d.Id(), err) + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionSetting, ResNameApplicationAccessScope, state.ID.String(), err), + err.Error(), + ) + return } - d.Set("application_arn", applicationARN) - d.Set("scope", output.Scope) - d.Set("authorized_targets", output.AuthorizedTargets) + state.ApplicationARN = flex.StringToFrameworkARN(ctx, aws.String(applicationARN)) + state.AuthorizedTargets = flex.FlattenFrameworkStringValueList(ctx, out.AuthorizedTargets) + state.Scope = flex.StringToFramework(ctx, out.Scope) - return diags + resp.Diagnostics.Append(resp.State.Set(ctx, &state)...) } -func resourceApplicationAccessScopeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).SSOAdminClient(ctx) +func (r *resourceApplicationAccessScope) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + //Update is no-op. +} - applicationARN, scope, err := ApplicationAccessScopeParseResourceID(d.Id()) - if err != nil { - return sdkdiag.AppendFromErr(diags, err) +func (r *resourceApplicationAccessScope) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + conn := r.Meta().SSOAdminClient(ctx) + + var state resourceApplicationAccessScopeData + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + if resp.Diagnostics.HasError() { + return } - log.Printf("[INFO] Deleting SSO Application Access Scope: %s", d.Id()) - _, err = conn.DeleteApplicationAccessScope(ctx, &ssoadmin.DeleteApplicationAccessScopeInput{ + applicationARN, scope, err := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) + in := &ssoadmin.DeleteApplicationAccessScopeInput{ ApplicationArn: aws.String(applicationARN), Scope: aws.String(scope), - }) - - if errs.IsA[*types.ResourceNotFoundException](err) { - return diags } + _, err = conn.DeleteApplicationAccessScope(ctx, in) if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting SSO Application Access Scope (%s): %s", d.Id(), err) + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return + } + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionDeleting, ResNameApplicationAccessScope, state.ID.String(), err), + err.Error(), + ) + return } +} - return diags +func (r *resourceApplicationAccessScope) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } const applicationAccessScopeIDSeparator = "," -func ApplicationAccessScopeCreateResourceID(applicationARN, scope string) string { +func ApplicationAccessScopeCreateResourceID(applicationARN, scope string) *string { parts := []string{applicationARN, scope} id := strings.Join(parts, applicationAccessScopeIDSeparator) - return id + return &id } func ApplicationAccessScopeParseResourceID(id string) (string, string, error) { @@ -166,28 +199,34 @@ func ApplicationAccessScopeParseResourceID(id string) (string, string, error) { return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected APPLICATION_ARN%[2]sSCOPE", id, applicationAccessScopeIDSeparator) } -func FindApplicationAccessScopeByScopeAndApplicationARN(ctx context.Context, conn *ssoadmin.Client, applicationARN, scope string) (*ssoadmin.GetApplicationAccessScopeOutput, error) { - input := &ssoadmin.GetApplicationAccessScopeInput{ +func findApplicationAccessScopeByID(ctx context.Context, conn *ssoadmin.Client, applicationARN, scope string) (*ssoadmin.GetApplicationAccessScopeOutput, error) { + in := &ssoadmin.GetApplicationAccessScopeInput{ ApplicationArn: aws.String(applicationARN), Scope: aws.String(scope), } - output, err := conn.GetApplicationAccessScope(ctx, input) - - if errs.IsA[*types.ResourceNotFoundException](err) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, + out, err := conn.GetApplicationAccessScope(ctx, in) + if err != nil { + if errs.IsA[*awstypes.ResourceNotFoundException](err) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: in, + } } - } - if err != nil { return nil, err } - if output == nil { - return nil, tfresource.NewEmptyResultError(input) + if out == nil { + return nil, tfresource.NewEmptyResultError(in) } - return output, nil + return out, nil +} + +type resourceApplicationAccessScopeData struct { + ApplicationARN fwtypes.ARN `tfsdk:"application_arn"` + AuthorizedTargets types.List `tfsdk:"authorized_targets"` + ID types.String `tfsdk:"id"` + Scope types.String `tfsdk:"scope"` } diff --git a/internal/service/ssoadmin/application_access_scope_test.go b/internal/service/ssoadmin/application_access_scope_test.go index 534d378ef7f..e49cc40ac31 100644 --- a/internal/service/ssoadmin/application_access_scope_test.go +++ b/internal/service/ssoadmin/application_access_scope_test.go @@ -5,16 +5,19 @@ package ssoadmin_test import ( "context" + "errors" "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/service/ssoadmin/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" - "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs" + tfssoadmin "github.com/hashicorp/terraform-provider-aws/internal/service/ssoadmin" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -63,7 +66,7 @@ func TestAccSSOAdminApplicationAccessScope_disappears(t *testing.T) { Config: testAccApplicationAccessScopeConfig_basic(rName, "sso:account:access"), Check: resource.ComposeTestCheckFunc( testAccCheckApplicationAccessScopeExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, ssoadmin.ResourceApplicationAccessScope(), resourceName), + acctest.CheckFrameworkResourceDisappears(ctx, acctest.Provider, tfssoadmin.ResourceApplicationAccessScope, resourceName), ), ExpectNonEmptyPlan: true, }, @@ -71,26 +74,6 @@ func TestAccSSOAdminApplicationAccessScope_disappears(t *testing.T) { }) } -func testAccCheckApplicationAccessScopeExists(ctx context.Context, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Not found: %s", n) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) - - applicationARN, scope, err := ssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) - if err != nil { - return err - } - - _, err = ssoadmin.FindApplicationAccessScopeByScopeAndApplicationARN(ctx, conn, applicationARN, scope) - - return err - } -} - func testAccCheckApplicationAccessScopeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) @@ -100,22 +83,39 @@ func testAccCheckApplicationAccessScopeDestroy(ctx context.Context) resource.Tes continue } - var applicationARN, scope, err = ssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + applicationARN, scope, err := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + _, err = tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + if errs.IsA[*types.ResourceNotFoundException](err) { + return nil + } if err != nil { - return err + return create.Error(names.SSOAdmin, create.ErrActionCheckingDestroyed, tfssoadmin.ResNameApplicationAccessScope, rs.Primary.ID, err) } - _, err = ssoadmin.FindApplicationAccessScopeByScopeAndApplicationARN(ctx, conn, applicationARN, scope) + return create.Error(names.SSOAdmin, create.ErrActionCheckingDestroyed, tfssoadmin.ResNameApplicationAccessScope, rs.Primary.ID, errors.New("not destroyed")) + } + + return nil + } +} - if tfresource.NotFound(err) { - continue - } +func testAccCheckApplicationAccessScopeExists(ctx context.Context, name string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameApplicationAccessScope, name, errors.New("not found")) + } - if err != nil { - return err - } + if rs.Primary.ID == "" { + return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameApplicationAccessScope, name, errors.New("not set")) + } - return fmt.Errorf("SSO Application Access Scope %s still exists", rs.Primary.ID) + conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) + + applicationARN, scope, err := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + _, err = tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + if err != nil { + return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameApplicationAccessScope, rs.Primary.ID, err) } return nil diff --git a/internal/service/ssoadmin/exports_test.go b/internal/service/ssoadmin/exports_test.go index 4ab0999ae72..e76751bccd9 100644 --- a/internal/service/ssoadmin/exports_test.go +++ b/internal/service/ssoadmin/exports_test.go @@ -8,8 +8,10 @@ var ( ResourceApplication = newResourceApplication ResourceApplicationAssignment = newResourceApplicationAssignment ResourceApplicationAssignmentConfiguration = newResourceApplicationAssignmentConfiguration + ResourceApplicationAccessScope = newResourceApplicationAccessScope FindApplicationByID = findApplicationByID FindApplicationAssignmentByID = findApplicationAssignmentByID FindApplicationAssignmentConfigurationByID = findApplicationAssignmentConfigurationByID + FindApplicationAccessScopeByID = findApplicationAccessScopeByID ) diff --git a/internal/service/ssoadmin/service_package_gen.go b/internal/service/ssoadmin/service_package_gen.go index 839d43a4be2..78214b260bf 100644 --- a/internal/service/ssoadmin/service_package_gen.go +++ b/internal/service/ssoadmin/service_package_gen.go @@ -32,6 +32,10 @@ func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.Servic Name: "Application", Tags: &types.ServicePackageResourceTags{}, }, + { + Factory: newResourceApplicationAccessScope, + Name: "Application Access Scope", + }, { Factory: newResourceApplicationAssignment, Name: "Application Assignment", @@ -62,10 +66,6 @@ func (p *servicePackage) SDKResources(ctx context.Context) []*types.ServicePacka Factory: ResourceAccountAssignment, TypeName: "aws_ssoadmin_account_assignment", }, - { - Factory: ResourceApplicationAccessScope, - TypeName: "aws_ssoadmin_application_access_scope", - }, { Factory: ResourceCustomerManagedPolicyAttachment, TypeName: "aws_ssoadmin_customer_managed_policy_attachment", From 8daa56f681dbd4aec5f0235291a10db4d3b0b4fa Mon Sep 17 00:00:00 2001 From: Daniel Rieske Date: Tue, 12 Dec 2023 18:36:42 +0100 Subject: [PATCH 077/438] feat: remove ineffective assignments --- internal/service/ssoadmin/application_access_scope.go | 6 +++--- .../service/ssoadmin/application_access_scope_test.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/ssoadmin/application_access_scope.go b/internal/service/ssoadmin/application_access_scope.go index 7e8dc6e1951..7999ca4134f 100644 --- a/internal/service/ssoadmin/application_access_scope.go +++ b/internal/service/ssoadmin/application_access_scope.go @@ -122,7 +122,7 @@ func (r *resourceApplicationAccessScope) Read(ctx context.Context, req resource. return } - applicationARN, scope, err := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) + applicationARN, scope, _ := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) out, err := findApplicationAccessScopeByID(ctx, conn, applicationARN, scope) if tfresource.NotFound(err) { @@ -157,13 +157,13 @@ func (r *resourceApplicationAccessScope) Delete(ctx context.Context, req resourc return } - applicationARN, scope, err := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) + applicationARN, scope, _ := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) in := &ssoadmin.DeleteApplicationAccessScopeInput{ ApplicationArn: aws.String(applicationARN), Scope: aws.String(scope), } - _, err = conn.DeleteApplicationAccessScope(ctx, in) + _, err := conn.DeleteApplicationAccessScope(ctx, in) if err != nil { if errs.IsA[*awstypes.ResourceNotFoundException](err) { return diff --git a/internal/service/ssoadmin/application_access_scope_test.go b/internal/service/ssoadmin/application_access_scope_test.go index e49cc40ac31..65a9bd11ad5 100644 --- a/internal/service/ssoadmin/application_access_scope_test.go +++ b/internal/service/ssoadmin/application_access_scope_test.go @@ -83,8 +83,8 @@ func testAccCheckApplicationAccessScopeDestroy(ctx context.Context) resource.Tes continue } - applicationARN, scope, err := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) - _, err = tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + applicationARN, scope, _ := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + _, err := tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) if errs.IsA[*types.ResourceNotFoundException](err) { return nil } @@ -112,8 +112,8 @@ func testAccCheckApplicationAccessScopeExists(ctx context.Context, name string) conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) - applicationARN, scope, err := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) - _, err = tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + applicationARN, scope, _ := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) + _, err := tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) if err != nil { return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameApplicationAccessScope, rs.Primary.ID, err) } From de56404dfe0e2265f7a796e9d5d66641aec6938f Mon Sep 17 00:00:00 2001 From: Moritz Megerle Date: Tue, 12 Dec 2023 19:12:24 +0100 Subject: [PATCH 078/438] feat: match errorCode only --- internal/service/s3/bucket.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 37d4a0c4244..7a238751c7a 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -31,6 +31,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" @@ -1116,7 +1117,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf return diags } - if err != nil && !tfawserr.ErrMessageContains(err, errCodeServerSideEncryptionConfigurationNotFound, "encryption configuration was not found") { + if err != nil && !tfawserr.ErrCodeEquals(err, errCodeServerSideEncryptionConfigurationNotFound) { return sdkdiag.AppendErrorf(diags, "getting S3 Bucket encryption: %s", err) } From 32c2c5ee0f9d5feea95a3199874c699f1549ad3a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 13:20:53 -0500 Subject: [PATCH 079/438] Document default value of 's3_us_east_1_regional_endpoint'. --- website/docs/index.html.markdown | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index b2cab4d50eb..91c25eb8f71 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -68,7 +68,7 @@ which are applied in the following order: 1. Shared credentials files 1. Shared configuration files 1. Container credentials -1. Instance profile credentials and region +1. Instance profile credentials and Region This order matches the precedence used by the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html#cli-configure-quickstart-precedence) @@ -110,7 +110,7 @@ Other settings related to authorization can be configured, such as: ### Environment Variables Credentials can be provided by using the `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`, and optionally `AWS_SESSION_TOKEN` environment variables. -The region can be set using the `AWS_REGION` or `AWS_DEFAULT_REGION` environment variables. +The Region can be set using the `AWS_REGION` or `AWS_DEFAULT_REGION` environment variables. For example: @@ -160,7 +160,7 @@ If you're running Terraform on CodeBuild or ECS and have configured an [IAM Task If you're running Terraform on EKS and have configured [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html), Terraform can use the pod's role. This support is based on the underlying `AWS_ROLE_ARN` and `AWS_WEB_IDENTITY_TOKEN_FILE` environment variables being automatically set by Kubernetes or manually for advanced usage. -### Instance profile credentials and region +### Instance profile credentials and Region When the AWS Provider is running on an EC2 instance with an IAM Instance Profile set, the provider can source credentials from the [EC2 Instance Metadata Service](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials). @@ -336,18 +336,19 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf Can also be set using the `NO_PROXY` or `no_proxy` environment variables. * `profile` - (Optional) AWS profile name as set in the shared configuration and credentials files. Can also be set using either the environment variables `AWS_PROFILE` or `AWS_DEFAULT_PROFILE`. -* `region` - (Optional) AWS region where the provider will operate. The region must be set. +* `region` - (Optional) AWS Region where the provider will operate. The Region must be set. Can also be set with either the `AWS_REGION` or `AWS_DEFAULT_REGION` environment variables, or via a shared config file parameter `region` if `profile` is used. - If credentials are retrieved from the EC2 Instance Metadata Service, the region can also be retrieved from the metadata. + If credentials are retrieved from the EC2 Instance Metadata Service, the Region can also be retrieved from the metadata. * `retry_mode` - (Optional) Specifies how retries are attempted. Valid values are `standard` and `adaptive`. Can also be configured using the `AWS_RETRY_MODE` environment variable or the shared config file parameter `retry_mode`. * `s3_use_path_style` - (Optional) Whether to enable the request to use path-style addressing, i.e., `https://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will use virtual hosted bucket addressing, `https://BUCKET.s3.amazonaws.com/KEY`, when possible. Specific to the Amazon S3 service. -* `s3_us_east_1_regional_endpoint` - (Optional) Specifies whether S3 API calls in the `us-east-1` region use the legacy global endpoint or a regional endpoint. +* `s3_us_east_1_regional_endpoint` - (Optional) Specifies whether S3 API calls in the `us-east-1` Region use the legacy global endpoint or a regional endpoint. Valid values are `legacy` or `regional`. + If omitted, the default behavior is to use the global endpoint in the `us-east-1` Region. Can also be configured using the `AWS_S3_US_EAST_1_REGIONAL_ENDPOINT` environment variable or the `s3_us_east_1_regional_endpoint` shared config file parameter. Specific to the Amazon S3 service. * `secret_key` - (Optional) AWS secret key. Can also be set with the `AWS_SECRET_ACCESS_KEY` environment variable, or via a shared configuration and credentials files if `profile` is used. See also `access_key`. @@ -355,7 +356,7 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf * `shared_credentials_files` - (Optional) List of paths to the shared credentials file. If not set and a profile is used, the default value is `[~/.aws/credentials]`. A single value can also be set with the `AWS_SHARED_CREDENTIALS_FILE` environment variable. * `skip_credentials_validation` - (Optional) Whether to skip credentials validation via the STS API. This can be useful for testing and for AWS API implementations that do not have STS available. * `skip_metadata_api_check` - (Optional) Whether to skip the AWS Metadata API check. Useful for AWS API implementations that do not have a metadata API endpoint. Setting to `true` prevents Terraform from authenticating via the Metadata API. You may need to use other authentication methods like static credentials, configuration variables, or environment variables. -* `skip_region_validation` - (Optional) Whether to skip validating the region. Useful for AWS-like implementations that use their own region names or to bypass the validation for regions that aren't publicly available yet. +* `skip_region_validation` - (Optional) Whether to skip validating the Region. Useful for AWS-like implementations that use their own Region names or to bypass the validation for Regions that aren't publicly available yet. * `skip_requesting_account_id` - (Optional) Whether to skip requesting the account ID. Useful for AWS API implementations that do not have the IAM, STS API, or metadata API. When set to `true` and not determined previously, returns an empty account ID when manually constructing ARN attributes with the following: - [`aws_api_gateway_deployment` resource](/docs/providers/aws/r/api_gateway_deployment.html) - [`aws_api_gateway_rest_api` resource](/docs/providers/aws/r/api_gateway_rest_api.html) @@ -472,7 +473,7 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf - [`aws_waf_size_constraint_set` resource](/docs/providers/aws/r/waf_size_constraint_set.html) - [`aws_waf_web_acl` resource](/docs/providers/aws/r/waf_web_acl.html) - [`aws_waf_xss_match_set` resource](/docs/providers/aws/r/waf_xss_match_set.html) -* `sts_region` - (Optional) AWS region for STS. If unset, AWS will use the same region for STS as other non-STS operations. +* `sts_region` - (Optional) AWS Region for STS. If unset, AWS will use the same Region for STS as other non-STS operations. * `token` - (Optional) Session token for validating temporary credentials. Typically provided after successful identity federation or Multi-Factor Authentication (MFA) login. With MFA login, this is the session token provided afterward, not the 6 digit MFA code used to get temporary credentials. Can also be set with the `AWS_SESSION_TOKEN` environment variable. * `use_dualstack_endpoint` - (Optional) Force the provider to resolve endpoints with DualStack capability. Can also be set with the `AWS_USE_DUALSTACK_ENDPOINT` environment variable or in a shared config file (`use_dualstack_endpoint`). * `use_fips_endpoint` - (Optional) Force the provider to resolve endpoints with FIPS capability. Can also be set with the `AWS_USE_FIPS_ENDPOINT` environment variable or in a shared config file (`use_fips_endpoint`). From 14c79420b1fce634809279094426f99dff2b3cf1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 13:51:26 -0500 Subject: [PATCH 080/438] r/aws_s3_directory_bucket: Use regional endpoint is us-east-1 in acceptance tests. --- internal/service/s3/directory_bucket_test.go | 4 ++-- internal/service/s3/exports_test.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/internal/service/s3/directory_bucket_test.go b/internal/service/s3/directory_bucket_test.go index a8a3cc53cb0..14ff2a70924 100644 --- a/internal/service/s3/directory_bucket_test.go +++ b/internal/service/s3/directory_bucket_test.go @@ -106,7 +106,7 @@ func testAccCheckDirectoryBucketDestroy(ctx context.Context) resource.TestCheckF continue } - err := tfs3.FindBucket(ctx, conn, rs.Primary.ID) + err := tfs3.FindBucket(ctx, conn, rs.Primary.ID, tfs3.UseRegionalEndpointInUSEast1) if tfresource.NotFound(err) { continue @@ -132,7 +132,7 @@ func testAccCheckDirectoryBucketExists(ctx context.Context, n string) resource.T conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - return tfs3.FindBucket(ctx, conn, rs.Primary.ID) + return tfs3.FindBucket(ctx, conn, rs.Primary.ID, tfs3.UseRegionalEndpointInUSEast1) } } diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index a8073d65d0d..cb4f49bc65f 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -31,6 +31,7 @@ var ( FindReplicationConfiguration = findReplicationConfiguration FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey + UseRegionalEndpointInUSEast1 = useRegionalEndpointInUSEast1 ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration LifecycleRuleStatusDisabled = lifecycleRuleStatusDisabled From 550e3a8b3dd1a1dfc3dc736397e78873c7022daf Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 14:26:01 -0500 Subject: [PATCH 081/438] s3: Add 'isDirectoryBucket'. --- internal/service/s3/directory_bucket.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/s3/directory_bucket.go b/internal/service/s3/directory_bucket.go index a1485758423..9b4dc02dc70 100644 --- a/internal/service/s3/directory_bucket.go +++ b/internal/service/s3/directory_bucket.go @@ -33,6 +33,10 @@ var ( directoryBucketNameRegex = regexache.MustCompile(`^([0-9a-z.-]+)--([a-z]+\d+-az\d+)--x-s3$`) ) +func isDirectoryBucket(bucket string) bool { + return directoryBucketNameRegex.MatchString(bucket) +} + // @FrameworkResource(name="Directory Bucket") func newDirectoryBucketResource(context.Context) (resource.ResourceWithConfigure, error) { r := &directoryBucketResource{} From 31fa5eff2138b214ea1062cb0c9905b0c77547ca Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 14:27:28 -0500 Subject: [PATCH 082/438] Add 'AWSClient.S3ExpressClient()'. --- internal/conns/awsclient.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 05e58080b59..3b8882b845d 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -11,6 +11,7 @@ import ( "sync" aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" + s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" apigatewayv2_sdkv1 "github.com/aws/aws-sdk-go/service/apigatewayv2" @@ -40,6 +41,7 @@ type AWSClient struct { httpClient *http.Client lock sync.Mutex logger baselogging.Logger + s3ExpressClient *s3_sdkv2.Client s3UsePathStyle bool // From provider configuration. s3UsEast1RegionalEndpoint endpoints_sdkv1.S3UsEast1RegionalEndpoint // From provider configuration. stsRegion string // From provider configuration. @@ -71,6 +73,13 @@ func (client *AWSClient) RegionalHostname(prefix string) string { return fmt.Sprintf("%s.%s.%s", prefix, client.Region, client.DNSSuffix) } +// S3ExpressClient returns an S3 API client suitable for use with S3 Express (directory buckets). +// This client differs from the standard S3 API client only in us-east-1 if the global S3 endpoint is used. +// In that case the returned client uses the regional S3 endpoint. +func (client *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { + return client.s3ExpressClient +} + // S3UsePathStyle returns the s3_force_path_style provider configuration value. func (client *AWSClient) S3UsePathStyle() bool { return client.s3UsePathStyle From 270c7f94173b3c61299605b400fb8ca2b7feb883 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 14:31:33 -0500 Subject: [PATCH 083/438] Add 'extra' to AWS SDK for Go v1 API client factory. --- internal/conns/awsclient.go | 2 +- internal/conns/awsclient_gen.go | 270 +++++++++++++------------- internal/generate/awsclient/file.tmpl | 2 +- 3 files changed, 137 insertions(+), 137 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 3b8882b845d..885cd6e5c74 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -187,7 +187,7 @@ func (client *AWSClient) apiClientConfig(servicePackageName string) map[string]a } // conn returns the AWS SDK for Go v1 API client for the specified service. -func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string) (T, error) { +func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, extra map[string]any) (T, error) { c.lock.Lock() defer c.lock.Unlock() diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index a4fdea6ff88..89963e5e535 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -237,19 +237,19 @@ func (c *AWSClient) ACMClient(ctx context.Context) *acm_sdkv2.Client { } func (c *AWSClient) ACMPCAConn(ctx context.Context) *acmpca_sdkv1.ACMPCA { - return errs.Must(conn[*acmpca_sdkv1.ACMPCA](ctx, c, names.ACMPCA)) + return errs.Must(conn[*acmpca_sdkv1.ACMPCA](ctx, c, names.ACMPCA, make(map[string]any))) } func (c *AWSClient) AMPConn(ctx context.Context) *prometheusservice_sdkv1.PrometheusService { - return errs.Must(conn[*prometheusservice_sdkv1.PrometheusService](ctx, c, names.AMP)) + return errs.Must(conn[*prometheusservice_sdkv1.PrometheusService](ctx, c, names.AMP, make(map[string]any))) } func (c *AWSClient) APIGatewayConn(ctx context.Context) *apigateway_sdkv1.APIGateway { - return errs.Must(conn[*apigateway_sdkv1.APIGateway](ctx, c, names.APIGateway)) + return errs.Must(conn[*apigateway_sdkv1.APIGateway](ctx, c, names.APIGateway, make(map[string]any))) } func (c *AWSClient) APIGatewayV2Conn(ctx context.Context) *apigatewayv2_sdkv1.ApiGatewayV2 { - return errs.Must(conn[*apigatewayv2_sdkv1.ApiGatewayV2](ctx, c, names.APIGatewayV2)) + return errs.Must(conn[*apigatewayv2_sdkv1.ApiGatewayV2](ctx, c, names.APIGatewayV2, make(map[string]any))) } func (c *AWSClient) AccessAnalyzerClient(ctx context.Context) *accessanalyzer_sdkv2.Client { @@ -261,15 +261,15 @@ func (c *AWSClient) AccountClient(ctx context.Context) *account_sdkv2.Client { } func (c *AWSClient) AmplifyConn(ctx context.Context) *amplify_sdkv1.Amplify { - return errs.Must(conn[*amplify_sdkv1.Amplify](ctx, c, names.Amplify)) + return errs.Must(conn[*amplify_sdkv1.Amplify](ctx, c, names.Amplify, make(map[string]any))) } func (c *AWSClient) AppAutoScalingConn(ctx context.Context) *applicationautoscaling_sdkv1.ApplicationAutoScaling { - return errs.Must(conn[*applicationautoscaling_sdkv1.ApplicationAutoScaling](ctx, c, names.AppAutoScaling)) + return errs.Must(conn[*applicationautoscaling_sdkv1.ApplicationAutoScaling](ctx, c, names.AppAutoScaling, make(map[string]any))) } func (c *AWSClient) AppConfigConn(ctx context.Context) *appconfig_sdkv1.AppConfig { - return errs.Must(conn[*appconfig_sdkv1.AppConfig](ctx, c, names.AppConfig)) + return errs.Must(conn[*appconfig_sdkv1.AppConfig](ctx, c, names.AppConfig, make(map[string]any))) } func (c *AWSClient) AppConfigClient(ctx context.Context) *appconfig_sdkv2.Client { @@ -285,11 +285,11 @@ func (c *AWSClient) AppFlowClient(ctx context.Context) *appflow_sdkv2.Client { } func (c *AWSClient) AppIntegrationsConn(ctx context.Context) *appintegrationsservice_sdkv1.AppIntegrationsService { - return errs.Must(conn[*appintegrationsservice_sdkv1.AppIntegrationsService](ctx, c, names.AppIntegrations)) + return errs.Must(conn[*appintegrationsservice_sdkv1.AppIntegrationsService](ctx, c, names.AppIntegrations, make(map[string]any))) } func (c *AWSClient) AppMeshConn(ctx context.Context) *appmesh_sdkv1.AppMesh { - return errs.Must(conn[*appmesh_sdkv1.AppMesh](ctx, c, names.AppMesh)) + return errs.Must(conn[*appmesh_sdkv1.AppMesh](ctx, c, names.AppMesh, make(map[string]any))) } func (c *AWSClient) AppRunnerClient(ctx context.Context) *apprunner_sdkv2.Client { @@ -297,15 +297,15 @@ func (c *AWSClient) AppRunnerClient(ctx context.Context) *apprunner_sdkv2.Client } func (c *AWSClient) AppStreamConn(ctx context.Context) *appstream_sdkv1.AppStream { - return errs.Must(conn[*appstream_sdkv1.AppStream](ctx, c, names.AppStream)) + return errs.Must(conn[*appstream_sdkv1.AppStream](ctx, c, names.AppStream, make(map[string]any))) } func (c *AWSClient) AppSyncConn(ctx context.Context) *appsync_sdkv1.AppSync { - return errs.Must(conn[*appsync_sdkv1.AppSync](ctx, c, names.AppSync)) + return errs.Must(conn[*appsync_sdkv1.AppSync](ctx, c, names.AppSync, make(map[string]any))) } func (c *AWSClient) ApplicationInsightsConn(ctx context.Context) *applicationinsights_sdkv1.ApplicationInsights { - return errs.Must(conn[*applicationinsights_sdkv1.ApplicationInsights](ctx, c, names.ApplicationInsights)) + return errs.Must(conn[*applicationinsights_sdkv1.ApplicationInsights](ctx, c, names.ApplicationInsights, make(map[string]any))) } func (c *AWSClient) AthenaClient(ctx context.Context) *athena_sdkv2.Client { @@ -317,19 +317,19 @@ func (c *AWSClient) AuditManagerClient(ctx context.Context) *auditmanager_sdkv2. } func (c *AWSClient) AutoScalingConn(ctx context.Context) *autoscaling_sdkv1.AutoScaling { - return errs.Must(conn[*autoscaling_sdkv1.AutoScaling](ctx, c, names.AutoScaling)) + return errs.Must(conn[*autoscaling_sdkv1.AutoScaling](ctx, c, names.AutoScaling, make(map[string]any))) } func (c *AWSClient) AutoScalingPlansConn(ctx context.Context) *autoscalingplans_sdkv1.AutoScalingPlans { - return errs.Must(conn[*autoscalingplans_sdkv1.AutoScalingPlans](ctx, c, names.AutoScalingPlans)) + return errs.Must(conn[*autoscalingplans_sdkv1.AutoScalingPlans](ctx, c, names.AutoScalingPlans, make(map[string]any))) } func (c *AWSClient) BackupConn(ctx context.Context) *backup_sdkv1.Backup { - return errs.Must(conn[*backup_sdkv1.Backup](ctx, c, names.Backup)) + return errs.Must(conn[*backup_sdkv1.Backup](ctx, c, names.Backup, make(map[string]any))) } func (c *AWSClient) BatchConn(ctx context.Context) *batch_sdkv1.Batch { - return errs.Must(conn[*batch_sdkv1.Batch](ctx, c, names.Batch)) + return errs.Must(conn[*batch_sdkv1.Batch](ctx, c, names.Batch, make(map[string]any))) } func (c *AWSClient) BedrockClient(ctx context.Context) *bedrock_sdkv2.Client { @@ -337,19 +337,19 @@ func (c *AWSClient) BedrockClient(ctx context.Context) *bedrock_sdkv2.Client { } func (c *AWSClient) BudgetsConn(ctx context.Context) *budgets_sdkv1.Budgets { - return errs.Must(conn[*budgets_sdkv1.Budgets](ctx, c, names.Budgets)) + return errs.Must(conn[*budgets_sdkv1.Budgets](ctx, c, names.Budgets, make(map[string]any))) } func (c *AWSClient) CEConn(ctx context.Context) *costexplorer_sdkv1.CostExplorer { - return errs.Must(conn[*costexplorer_sdkv1.CostExplorer](ctx, c, names.CE)) + return errs.Must(conn[*costexplorer_sdkv1.CostExplorer](ctx, c, names.CE, make(map[string]any))) } func (c *AWSClient) CURConn(ctx context.Context) *costandusagereportservice_sdkv1.CostandUsageReportService { - return errs.Must(conn[*costandusagereportservice_sdkv1.CostandUsageReportService](ctx, c, names.CUR)) + return errs.Must(conn[*costandusagereportservice_sdkv1.CostandUsageReportService](ctx, c, names.CUR, make(map[string]any))) } func (c *AWSClient) ChimeConn(ctx context.Context) *chime_sdkv1.Chime { - return errs.Must(conn[*chime_sdkv1.Chime](ctx, c, names.Chime)) + return errs.Must(conn[*chime_sdkv1.Chime](ctx, c, names.Chime, make(map[string]any))) } func (c *AWSClient) ChimeSDKMediaPipelinesClient(ctx context.Context) *chimesdkmediapipelines_sdkv2.Client { @@ -365,7 +365,7 @@ func (c *AWSClient) CleanRoomsClient(ctx context.Context) *cleanrooms_sdkv2.Clie } func (c *AWSClient) Cloud9Conn(ctx context.Context) *cloud9_sdkv1.Cloud9 { - return errs.Must(conn[*cloud9_sdkv1.Cloud9](ctx, c, names.Cloud9)) + return errs.Must(conn[*cloud9_sdkv1.Cloud9](ctx, c, names.Cloud9, make(map[string]any))) } func (c *AWSClient) CloudControlClient(ctx context.Context) *cloudcontrol_sdkv2.Client { @@ -373,35 +373,35 @@ func (c *AWSClient) CloudControlClient(ctx context.Context) *cloudcontrol_sdkv2. } func (c *AWSClient) CloudFormationConn(ctx context.Context) *cloudformation_sdkv1.CloudFormation { - return errs.Must(conn[*cloudformation_sdkv1.CloudFormation](ctx, c, names.CloudFormation)) + return errs.Must(conn[*cloudformation_sdkv1.CloudFormation](ctx, c, names.CloudFormation, make(map[string]any))) } func (c *AWSClient) CloudFrontConn(ctx context.Context) *cloudfront_sdkv1.CloudFront { - return errs.Must(conn[*cloudfront_sdkv1.CloudFront](ctx, c, names.CloudFront)) + return errs.Must(conn[*cloudfront_sdkv1.CloudFront](ctx, c, names.CloudFront, make(map[string]any))) } func (c *AWSClient) CloudHSMV2Conn(ctx context.Context) *cloudhsmv2_sdkv1.CloudHSMV2 { - return errs.Must(conn[*cloudhsmv2_sdkv1.CloudHSMV2](ctx, c, names.CloudHSMV2)) + return errs.Must(conn[*cloudhsmv2_sdkv1.CloudHSMV2](ctx, c, names.CloudHSMV2, make(map[string]any))) } func (c *AWSClient) CloudSearchConn(ctx context.Context) *cloudsearch_sdkv1.CloudSearch { - return errs.Must(conn[*cloudsearch_sdkv1.CloudSearch](ctx, c, names.CloudSearch)) + return errs.Must(conn[*cloudsearch_sdkv1.CloudSearch](ctx, c, names.CloudSearch, make(map[string]any))) } func (c *AWSClient) CloudTrailConn(ctx context.Context) *cloudtrail_sdkv1.CloudTrail { - return errs.Must(conn[*cloudtrail_sdkv1.CloudTrail](ctx, c, names.CloudTrail)) + return errs.Must(conn[*cloudtrail_sdkv1.CloudTrail](ctx, c, names.CloudTrail, make(map[string]any))) } func (c *AWSClient) CloudWatchConn(ctx context.Context) *cloudwatch_sdkv1.CloudWatch { - return errs.Must(conn[*cloudwatch_sdkv1.CloudWatch](ctx, c, names.CloudWatch)) + return errs.Must(conn[*cloudwatch_sdkv1.CloudWatch](ctx, c, names.CloudWatch, make(map[string]any))) } func (c *AWSClient) CodeArtifactConn(ctx context.Context) *codeartifact_sdkv1.CodeArtifact { - return errs.Must(conn[*codeartifact_sdkv1.CodeArtifact](ctx, c, names.CodeArtifact)) + return errs.Must(conn[*codeartifact_sdkv1.CodeArtifact](ctx, c, names.CodeArtifact, make(map[string]any))) } func (c *AWSClient) CodeBuildConn(ctx context.Context) *codebuild_sdkv1.CodeBuild { - return errs.Must(conn[*codebuild_sdkv1.CodeBuild](ctx, c, names.CodeBuild)) + return errs.Must(conn[*codebuild_sdkv1.CodeBuild](ctx, c, names.CodeBuild, make(map[string]any))) } func (c *AWSClient) CodeCatalystClient(ctx context.Context) *codecatalyst_sdkv2.Client { @@ -409,7 +409,7 @@ func (c *AWSClient) CodeCatalystClient(ctx context.Context) *codecatalyst_sdkv2. } func (c *AWSClient) CodeCommitConn(ctx context.Context) *codecommit_sdkv1.CodeCommit { - return errs.Must(conn[*codecommit_sdkv1.CodeCommit](ctx, c, names.CodeCommit)) + return errs.Must(conn[*codecommit_sdkv1.CodeCommit](ctx, c, names.CodeCommit, make(map[string]any))) } func (c *AWSClient) CodeGuruProfilerClient(ctx context.Context) *codeguruprofiler_sdkv2.Client { @@ -417,11 +417,11 @@ func (c *AWSClient) CodeGuruProfilerClient(ctx context.Context) *codeguruprofile } func (c *AWSClient) CodeGuruReviewerConn(ctx context.Context) *codegurureviewer_sdkv1.CodeGuruReviewer { - return errs.Must(conn[*codegurureviewer_sdkv1.CodeGuruReviewer](ctx, c, names.CodeGuruReviewer)) + return errs.Must(conn[*codegurureviewer_sdkv1.CodeGuruReviewer](ctx, c, names.CodeGuruReviewer, make(map[string]any))) } func (c *AWSClient) CodePipelineConn(ctx context.Context) *codepipeline_sdkv1.CodePipeline { - return errs.Must(conn[*codepipeline_sdkv1.CodePipeline](ctx, c, names.CodePipeline)) + return errs.Must(conn[*codepipeline_sdkv1.CodePipeline](ctx, c, names.CodePipeline, make(map[string]any))) } func (c *AWSClient) CodeStarConnectionsClient(ctx context.Context) *codestarconnections_sdkv2.Client { @@ -433,11 +433,11 @@ func (c *AWSClient) CodeStarNotificationsClient(ctx context.Context) *codestarno } func (c *AWSClient) CognitoIDPConn(ctx context.Context) *cognitoidentityprovider_sdkv1.CognitoIdentityProvider { - return errs.Must(conn[*cognitoidentityprovider_sdkv1.CognitoIdentityProvider](ctx, c, names.CognitoIDP)) + return errs.Must(conn[*cognitoidentityprovider_sdkv1.CognitoIdentityProvider](ctx, c, names.CognitoIDP, make(map[string]any))) } func (c *AWSClient) CognitoIdentityConn(ctx context.Context) *cognitoidentity_sdkv1.CognitoIdentity { - return errs.Must(conn[*cognitoidentity_sdkv1.CognitoIdentity](ctx, c, names.CognitoIdentity)) + return errs.Must(conn[*cognitoidentity_sdkv1.CognitoIdentity](ctx, c, names.CognitoIdentity, make(map[string]any))) } func (c *AWSClient) ComprehendClient(ctx context.Context) *comprehend_sdkv2.Client { @@ -449,11 +449,11 @@ func (c *AWSClient) ComputeOptimizerClient(ctx context.Context) *computeoptimize } func (c *AWSClient) ConfigServiceConn(ctx context.Context) *configservice_sdkv1.ConfigService { - return errs.Must(conn[*configservice_sdkv1.ConfigService](ctx, c, names.ConfigService)) + return errs.Must(conn[*configservice_sdkv1.ConfigService](ctx, c, names.ConfigService, make(map[string]any))) } func (c *AWSClient) ConnectConn(ctx context.Context) *connect_sdkv1.Connect { - return errs.Must(conn[*connect_sdkv1.Connect](ctx, c, names.Connect)) + return errs.Must(conn[*connect_sdkv1.Connect](ctx, c, names.Connect, make(map[string]any))) } func (c *AWSClient) ConnectCasesClient(ctx context.Context) *connectcases_sdkv2.Client { @@ -469,19 +469,19 @@ func (c *AWSClient) CustomerProfilesClient(ctx context.Context) *customerprofile } func (c *AWSClient) DAXConn(ctx context.Context) *dax_sdkv1.DAX { - return errs.Must(conn[*dax_sdkv1.DAX](ctx, c, names.DAX)) + return errs.Must(conn[*dax_sdkv1.DAX](ctx, c, names.DAX, make(map[string]any))) } func (c *AWSClient) DLMConn(ctx context.Context) *dlm_sdkv1.DLM { - return errs.Must(conn[*dlm_sdkv1.DLM](ctx, c, names.DLM)) + return errs.Must(conn[*dlm_sdkv1.DLM](ctx, c, names.DLM, make(map[string]any))) } func (c *AWSClient) DMSConn(ctx context.Context) *databasemigrationservice_sdkv1.DatabaseMigrationService { - return errs.Must(conn[*databasemigrationservice_sdkv1.DatabaseMigrationService](ctx, c, names.DMS)) + return errs.Must(conn[*databasemigrationservice_sdkv1.DatabaseMigrationService](ctx, c, names.DMS, make(map[string]any))) } func (c *AWSClient) DSConn(ctx context.Context) *directoryservice_sdkv1.DirectoryService { - return errs.Must(conn[*directoryservice_sdkv1.DirectoryService](ctx, c, names.DS)) + return errs.Must(conn[*directoryservice_sdkv1.DirectoryService](ctx, c, names.DS, make(map[string]any))) } func (c *AWSClient) DSClient(ctx context.Context) *directoryservice_sdkv2.Client { @@ -489,15 +489,15 @@ func (c *AWSClient) DSClient(ctx context.Context) *directoryservice_sdkv2.Client } func (c *AWSClient) DataExchangeConn(ctx context.Context) *dataexchange_sdkv1.DataExchange { - return errs.Must(conn[*dataexchange_sdkv1.DataExchange](ctx, c, names.DataExchange)) + return errs.Must(conn[*dataexchange_sdkv1.DataExchange](ctx, c, names.DataExchange, make(map[string]any))) } func (c *AWSClient) DataPipelineConn(ctx context.Context) *datapipeline_sdkv1.DataPipeline { - return errs.Must(conn[*datapipeline_sdkv1.DataPipeline](ctx, c, names.DataPipeline)) + return errs.Must(conn[*datapipeline_sdkv1.DataPipeline](ctx, c, names.DataPipeline, make(map[string]any))) } func (c *AWSClient) DataSyncConn(ctx context.Context) *datasync_sdkv1.DataSync { - return errs.Must(conn[*datasync_sdkv1.DataSync](ctx, c, names.DataSync)) + return errs.Must(conn[*datasync_sdkv1.DataSync](ctx, c, names.DataSync, make(map[string]any))) } func (c *AWSClient) DeployClient(ctx context.Context) *codedeploy_sdkv2.Client { @@ -505,19 +505,19 @@ func (c *AWSClient) DeployClient(ctx context.Context) *codedeploy_sdkv2.Client { } func (c *AWSClient) DetectiveConn(ctx context.Context) *detective_sdkv1.Detective { - return errs.Must(conn[*detective_sdkv1.Detective](ctx, c, names.Detective)) + return errs.Must(conn[*detective_sdkv1.Detective](ctx, c, names.Detective, make(map[string]any))) } func (c *AWSClient) DeviceFarmConn(ctx context.Context) *devicefarm_sdkv1.DeviceFarm { - return errs.Must(conn[*devicefarm_sdkv1.DeviceFarm](ctx, c, names.DeviceFarm)) + return errs.Must(conn[*devicefarm_sdkv1.DeviceFarm](ctx, c, names.DeviceFarm, make(map[string]any))) } func (c *AWSClient) DirectConnectConn(ctx context.Context) *directconnect_sdkv1.DirectConnect { - return errs.Must(conn[*directconnect_sdkv1.DirectConnect](ctx, c, names.DirectConnect)) + return errs.Must(conn[*directconnect_sdkv1.DirectConnect](ctx, c, names.DirectConnect, make(map[string]any))) } func (c *AWSClient) DocDBConn(ctx context.Context) *docdb_sdkv1.DocDB { - return errs.Must(conn[*docdb_sdkv1.DocDB](ctx, c, names.DocDB)) + return errs.Must(conn[*docdb_sdkv1.DocDB](ctx, c, names.DocDB, make(map[string]any))) } func (c *AWSClient) DocDBElasticClient(ctx context.Context) *docdbelastic_sdkv2.Client { @@ -525,11 +525,11 @@ func (c *AWSClient) DocDBElasticClient(ctx context.Context) *docdbelastic_sdkv2. } func (c *AWSClient) DynamoDBConn(ctx context.Context) *dynamodb_sdkv1.DynamoDB { - return errs.Must(conn[*dynamodb_sdkv1.DynamoDB](ctx, c, names.DynamoDB)) + return errs.Must(conn[*dynamodb_sdkv1.DynamoDB](ctx, c, names.DynamoDB, make(map[string]any))) } func (c *AWSClient) EC2Conn(ctx context.Context) *ec2_sdkv1.EC2 { - return errs.Must(conn[*ec2_sdkv1.EC2](ctx, c, names.EC2)) + return errs.Must(conn[*ec2_sdkv1.EC2](ctx, c, names.EC2, make(map[string]any))) } func (c *AWSClient) EC2Client(ctx context.Context) *ec2_sdkv2.Client { @@ -537,7 +537,7 @@ func (c *AWSClient) EC2Client(ctx context.Context) *ec2_sdkv2.Client { } func (c *AWSClient) ECRConn(ctx context.Context) *ecr_sdkv1.ECR { - return errs.Must(conn[*ecr_sdkv1.ECR](ctx, c, names.ECR)) + return errs.Must(conn[*ecr_sdkv1.ECR](ctx, c, names.ECR, make(map[string]any))) } func (c *AWSClient) ECRClient(ctx context.Context) *ecr_sdkv2.Client { @@ -545,15 +545,15 @@ func (c *AWSClient) ECRClient(ctx context.Context) *ecr_sdkv2.Client { } func (c *AWSClient) ECRPublicConn(ctx context.Context) *ecrpublic_sdkv1.ECRPublic { - return errs.Must(conn[*ecrpublic_sdkv1.ECRPublic](ctx, c, names.ECRPublic)) + return errs.Must(conn[*ecrpublic_sdkv1.ECRPublic](ctx, c, names.ECRPublic, make(map[string]any))) } func (c *AWSClient) ECSConn(ctx context.Context) *ecs_sdkv1.ECS { - return errs.Must(conn[*ecs_sdkv1.ECS](ctx, c, names.ECS)) + return errs.Must(conn[*ecs_sdkv1.ECS](ctx, c, names.ECS, make(map[string]any))) } func (c *AWSClient) EFSConn(ctx context.Context) *efs_sdkv1.EFS { - return errs.Must(conn[*efs_sdkv1.EFS](ctx, c, names.EFS)) + return errs.Must(conn[*efs_sdkv1.EFS](ctx, c, names.EFS, make(map[string]any))) } func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { @@ -561,15 +561,15 @@ func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { } func (c *AWSClient) ELBConn(ctx context.Context) *elb_sdkv1.ELB { - return errs.Must(conn[*elb_sdkv1.ELB](ctx, c, names.ELB)) + return errs.Must(conn[*elb_sdkv1.ELB](ctx, c, names.ELB, make(map[string]any))) } func (c *AWSClient) ELBV2Conn(ctx context.Context) *elbv2_sdkv1.ELBV2 { - return errs.Must(conn[*elbv2_sdkv1.ELBV2](ctx, c, names.ELBV2)) + return errs.Must(conn[*elbv2_sdkv1.ELBV2](ctx, c, names.ELBV2, make(map[string]any))) } func (c *AWSClient) EMRConn(ctx context.Context) *emr_sdkv1.EMR { - return errs.Must(conn[*emr_sdkv1.EMR](ctx, c, names.EMR)) + return errs.Must(conn[*emr_sdkv1.EMR](ctx, c, names.EMR, make(map[string]any))) } func (c *AWSClient) EMRClient(ctx context.Context) *emr_sdkv2.Client { @@ -577,7 +577,7 @@ func (c *AWSClient) EMRClient(ctx context.Context) *emr_sdkv2.Client { } func (c *AWSClient) EMRContainersConn(ctx context.Context) *emrcontainers_sdkv1.EMRContainers { - return errs.Must(conn[*emrcontainers_sdkv1.EMRContainers](ctx, c, names.EMRContainers)) + return errs.Must(conn[*emrcontainers_sdkv1.EMRContainers](ctx, c, names.EMRContainers, make(map[string]any))) } func (c *AWSClient) EMRServerlessClient(ctx context.Context) *emrserverless_sdkv2.Client { @@ -585,23 +585,23 @@ func (c *AWSClient) EMRServerlessClient(ctx context.Context) *emrserverless_sdkv } func (c *AWSClient) ElastiCacheConn(ctx context.Context) *elasticache_sdkv1.ElastiCache { - return errs.Must(conn[*elasticache_sdkv1.ElastiCache](ctx, c, names.ElastiCache)) + return errs.Must(conn[*elasticache_sdkv1.ElastiCache](ctx, c, names.ElastiCache, make(map[string]any))) } func (c *AWSClient) ElasticBeanstalkConn(ctx context.Context) *elasticbeanstalk_sdkv1.ElasticBeanstalk { - return errs.Must(conn[*elasticbeanstalk_sdkv1.ElasticBeanstalk](ctx, c, names.ElasticBeanstalk)) + return errs.Must(conn[*elasticbeanstalk_sdkv1.ElasticBeanstalk](ctx, c, names.ElasticBeanstalk, make(map[string]any))) } func (c *AWSClient) ElasticTranscoderConn(ctx context.Context) *elastictranscoder_sdkv1.ElasticTranscoder { - return errs.Must(conn[*elastictranscoder_sdkv1.ElasticTranscoder](ctx, c, names.ElasticTranscoder)) + return errs.Must(conn[*elastictranscoder_sdkv1.ElasticTranscoder](ctx, c, names.ElasticTranscoder, make(map[string]any))) } func (c *AWSClient) ElasticsearchConn(ctx context.Context) *elasticsearchservice_sdkv1.ElasticsearchService { - return errs.Must(conn[*elasticsearchservice_sdkv1.ElasticsearchService](ctx, c, names.Elasticsearch)) + return errs.Must(conn[*elasticsearchservice_sdkv1.ElasticsearchService](ctx, c, names.Elasticsearch, make(map[string]any))) } func (c *AWSClient) EventsConn(ctx context.Context) *eventbridge_sdkv1.EventBridge { - return errs.Must(conn[*eventbridge_sdkv1.EventBridge](ctx, c, names.Events)) + return errs.Must(conn[*eventbridge_sdkv1.EventBridge](ctx, c, names.Events, make(map[string]any))) } func (c *AWSClient) EvidentlyClient(ctx context.Context) *evidently_sdkv2.Client { @@ -613,11 +613,11 @@ func (c *AWSClient) FISClient(ctx context.Context) *fis_sdkv2.Client { } func (c *AWSClient) FMSConn(ctx context.Context) *fms_sdkv1.FMS { - return errs.Must(conn[*fms_sdkv1.FMS](ctx, c, names.FMS)) + return errs.Must(conn[*fms_sdkv1.FMS](ctx, c, names.FMS, make(map[string]any))) } func (c *AWSClient) FSxConn(ctx context.Context) *fsx_sdkv1.FSx { - return errs.Must(conn[*fsx_sdkv1.FSx](ctx, c, names.FSx)) + return errs.Must(conn[*fsx_sdkv1.FSx](ctx, c, names.FSx, make(map[string]any))) } func (c *AWSClient) FinSpaceClient(ctx context.Context) *finspace_sdkv2.Client { @@ -625,11 +625,11 @@ func (c *AWSClient) FinSpaceClient(ctx context.Context) *finspace_sdkv2.Client { } func (c *AWSClient) FirehoseConn(ctx context.Context) *firehose_sdkv1.Firehose { - return errs.Must(conn[*firehose_sdkv1.Firehose](ctx, c, names.Firehose)) + return errs.Must(conn[*firehose_sdkv1.Firehose](ctx, c, names.Firehose, make(map[string]any))) } func (c *AWSClient) GameLiftConn(ctx context.Context) *gamelift_sdkv1.GameLift { - return errs.Must(conn[*gamelift_sdkv1.GameLift](ctx, c, names.GameLift)) + return errs.Must(conn[*gamelift_sdkv1.GameLift](ctx, c, names.GameLift, make(map[string]any))) } func (c *AWSClient) GlacierClient(ctx context.Context) *glacier_sdkv2.Client { @@ -637,23 +637,23 @@ func (c *AWSClient) GlacierClient(ctx context.Context) *glacier_sdkv2.Client { } func (c *AWSClient) GlobalAcceleratorConn(ctx context.Context) *globalaccelerator_sdkv1.GlobalAccelerator { - return errs.Must(conn[*globalaccelerator_sdkv1.GlobalAccelerator](ctx, c, names.GlobalAccelerator)) + return errs.Must(conn[*globalaccelerator_sdkv1.GlobalAccelerator](ctx, c, names.GlobalAccelerator, make(map[string]any))) } func (c *AWSClient) GlueConn(ctx context.Context) *glue_sdkv1.Glue { - return errs.Must(conn[*glue_sdkv1.Glue](ctx, c, names.Glue)) + return errs.Must(conn[*glue_sdkv1.Glue](ctx, c, names.Glue, make(map[string]any))) } func (c *AWSClient) GrafanaConn(ctx context.Context) *managedgrafana_sdkv1.ManagedGrafana { - return errs.Must(conn[*managedgrafana_sdkv1.ManagedGrafana](ctx, c, names.Grafana)) + return errs.Must(conn[*managedgrafana_sdkv1.ManagedGrafana](ctx, c, names.Grafana, make(map[string]any))) } func (c *AWSClient) GreengrassConn(ctx context.Context) *greengrass_sdkv1.Greengrass { - return errs.Must(conn[*greengrass_sdkv1.Greengrass](ctx, c, names.Greengrass)) + return errs.Must(conn[*greengrass_sdkv1.Greengrass](ctx, c, names.Greengrass, make(map[string]any))) } func (c *AWSClient) GuardDutyConn(ctx context.Context) *guardduty_sdkv1.GuardDuty { - return errs.Must(conn[*guardduty_sdkv1.GuardDuty](ctx, c, names.GuardDuty)) + return errs.Must(conn[*guardduty_sdkv1.GuardDuty](ctx, c, names.GuardDuty, make(map[string]any))) } func (c *AWSClient) HealthLakeClient(ctx context.Context) *healthlake_sdkv2.Client { @@ -661,11 +661,11 @@ func (c *AWSClient) HealthLakeClient(ctx context.Context) *healthlake_sdkv2.Clie } func (c *AWSClient) IAMConn(ctx context.Context) *iam_sdkv1.IAM { - return errs.Must(conn[*iam_sdkv1.IAM](ctx, c, names.IAM)) + return errs.Must(conn[*iam_sdkv1.IAM](ctx, c, names.IAM, make(map[string]any))) } func (c *AWSClient) IVSConn(ctx context.Context) *ivs_sdkv1.IVS { - return errs.Must(conn[*ivs_sdkv1.IVS](ctx, c, names.IVS)) + return errs.Must(conn[*ivs_sdkv1.IVS](ctx, c, names.IVS, make(map[string]any))) } func (c *AWSClient) IVSChatClient(ctx context.Context) *ivschat_sdkv2.Client { @@ -677,11 +677,11 @@ func (c *AWSClient) IdentityStoreClient(ctx context.Context) *identitystore_sdkv } func (c *AWSClient) ImageBuilderConn(ctx context.Context) *imagebuilder_sdkv1.Imagebuilder { - return errs.Must(conn[*imagebuilder_sdkv1.Imagebuilder](ctx, c, names.ImageBuilder)) + return errs.Must(conn[*imagebuilder_sdkv1.Imagebuilder](ctx, c, names.ImageBuilder, make(map[string]any))) } func (c *AWSClient) InspectorConn(ctx context.Context) *inspector_sdkv1.Inspector { - return errs.Must(conn[*inspector_sdkv1.Inspector](ctx, c, names.Inspector)) + return errs.Must(conn[*inspector_sdkv1.Inspector](ctx, c, names.Inspector, make(map[string]any))) } func (c *AWSClient) Inspector2Client(ctx context.Context) *inspector2_sdkv2.Client { @@ -693,23 +693,23 @@ func (c *AWSClient) InternetMonitorClient(ctx context.Context) *internetmonitor_ } func (c *AWSClient) IoTConn(ctx context.Context) *iot_sdkv1.IoT { - return errs.Must(conn[*iot_sdkv1.IoT](ctx, c, names.IoT)) + return errs.Must(conn[*iot_sdkv1.IoT](ctx, c, names.IoT, make(map[string]any))) } func (c *AWSClient) IoTAnalyticsConn(ctx context.Context) *iotanalytics_sdkv1.IoTAnalytics { - return errs.Must(conn[*iotanalytics_sdkv1.IoTAnalytics](ctx, c, names.IoTAnalytics)) + return errs.Must(conn[*iotanalytics_sdkv1.IoTAnalytics](ctx, c, names.IoTAnalytics, make(map[string]any))) } func (c *AWSClient) IoTEventsConn(ctx context.Context) *iotevents_sdkv1.IoTEvents { - return errs.Must(conn[*iotevents_sdkv1.IoTEvents](ctx, c, names.IoTEvents)) + return errs.Must(conn[*iotevents_sdkv1.IoTEvents](ctx, c, names.IoTEvents, make(map[string]any))) } func (c *AWSClient) KMSConn(ctx context.Context) *kms_sdkv1.KMS { - return errs.Must(conn[*kms_sdkv1.KMS](ctx, c, names.KMS)) + return errs.Must(conn[*kms_sdkv1.KMS](ctx, c, names.KMS, make(map[string]any))) } func (c *AWSClient) KafkaConn(ctx context.Context) *kafka_sdkv1.Kafka { - return errs.Must(conn[*kafka_sdkv1.Kafka](ctx, c, names.Kafka)) + return errs.Must(conn[*kafka_sdkv1.Kafka](ctx, c, names.Kafka, make(map[string]any))) } func (c *AWSClient) KafkaClient(ctx context.Context) *kafka_sdkv2.Client { @@ -717,7 +717,7 @@ func (c *AWSClient) KafkaClient(ctx context.Context) *kafka_sdkv2.Client { } func (c *AWSClient) KafkaConnectConn(ctx context.Context) *kafkaconnect_sdkv1.KafkaConnect { - return errs.Must(conn[*kafkaconnect_sdkv1.KafkaConnect](ctx, c, names.KafkaConnect)) + return errs.Must(conn[*kafkaconnect_sdkv1.KafkaConnect](ctx, c, names.KafkaConnect, make(map[string]any))) } func (c *AWSClient) KendraClient(ctx context.Context) *kendra_sdkv2.Client { @@ -729,27 +729,27 @@ func (c *AWSClient) KeyspacesClient(ctx context.Context) *keyspaces_sdkv2.Client } func (c *AWSClient) KinesisConn(ctx context.Context) *kinesis_sdkv1.Kinesis { - return errs.Must(conn[*kinesis_sdkv1.Kinesis](ctx, c, names.Kinesis)) + return errs.Must(conn[*kinesis_sdkv1.Kinesis](ctx, c, names.Kinesis, make(map[string]any))) } func (c *AWSClient) KinesisAnalyticsConn(ctx context.Context) *kinesisanalytics_sdkv1.KinesisAnalytics { - return errs.Must(conn[*kinesisanalytics_sdkv1.KinesisAnalytics](ctx, c, names.KinesisAnalytics)) + return errs.Must(conn[*kinesisanalytics_sdkv1.KinesisAnalytics](ctx, c, names.KinesisAnalytics, make(map[string]any))) } func (c *AWSClient) KinesisAnalyticsV2Conn(ctx context.Context) *kinesisanalyticsv2_sdkv1.KinesisAnalyticsV2 { - return errs.Must(conn[*kinesisanalyticsv2_sdkv1.KinesisAnalyticsV2](ctx, c, names.KinesisAnalyticsV2)) + return errs.Must(conn[*kinesisanalyticsv2_sdkv1.KinesisAnalyticsV2](ctx, c, names.KinesisAnalyticsV2, make(map[string]any))) } func (c *AWSClient) KinesisVideoConn(ctx context.Context) *kinesisvideo_sdkv1.KinesisVideo { - return errs.Must(conn[*kinesisvideo_sdkv1.KinesisVideo](ctx, c, names.KinesisVideo)) + return errs.Must(conn[*kinesisvideo_sdkv1.KinesisVideo](ctx, c, names.KinesisVideo, make(map[string]any))) } func (c *AWSClient) LakeFormationConn(ctx context.Context) *lakeformation_sdkv1.LakeFormation { - return errs.Must(conn[*lakeformation_sdkv1.LakeFormation](ctx, c, names.LakeFormation)) + return errs.Must(conn[*lakeformation_sdkv1.LakeFormation](ctx, c, names.LakeFormation, make(map[string]any))) } func (c *AWSClient) LambdaConn(ctx context.Context) *lambda_sdkv1.Lambda { - return errs.Must(conn[*lambda_sdkv1.Lambda](ctx, c, names.Lambda)) + return errs.Must(conn[*lambda_sdkv1.Lambda](ctx, c, names.Lambda, make(map[string]any))) } func (c *AWSClient) LambdaClient(ctx context.Context) *lambda_sdkv2.Client { @@ -757,7 +757,7 @@ func (c *AWSClient) LambdaClient(ctx context.Context) *lambda_sdkv2.Client { } func (c *AWSClient) LexModelsConn(ctx context.Context) *lexmodelbuildingservice_sdkv1.LexModelBuildingService { - return errs.Must(conn[*lexmodelbuildingservice_sdkv1.LexModelBuildingService](ctx, c, names.LexModels)) + return errs.Must(conn[*lexmodelbuildingservice_sdkv1.LexModelBuildingService](ctx, c, names.LexModels, make(map[string]any))) } func (c *AWSClient) LexV2ModelsClient(ctx context.Context) *lexmodelsv2_sdkv2.Client { @@ -765,7 +765,7 @@ func (c *AWSClient) LexV2ModelsClient(ctx context.Context) *lexmodelsv2_sdkv2.Cl } func (c *AWSClient) LicenseManagerConn(ctx context.Context) *licensemanager_sdkv1.LicenseManager { - return errs.Must(conn[*licensemanager_sdkv1.LicenseManager](ctx, c, names.LicenseManager)) + return errs.Must(conn[*licensemanager_sdkv1.LicenseManager](ctx, c, names.LicenseManager, make(map[string]any))) } func (c *AWSClient) LightsailClient(ctx context.Context) *lightsail_sdkv2.Client { @@ -773,7 +773,7 @@ func (c *AWSClient) LightsailClient(ctx context.Context) *lightsail_sdkv2.Client } func (c *AWSClient) LocationConn(ctx context.Context) *locationservice_sdkv1.LocationService { - return errs.Must(conn[*locationservice_sdkv1.LocationService](ctx, c, names.Location)) + return errs.Must(conn[*locationservice_sdkv1.LocationService](ctx, c, names.Location, make(map[string]any))) } func (c *AWSClient) LogsClient(ctx context.Context) *cloudwatchlogs_sdkv2.Client { @@ -785,15 +785,15 @@ func (c *AWSClient) LookoutMetricsClient(ctx context.Context) *lookoutmetrics_sd } func (c *AWSClient) MQConn(ctx context.Context) *mq_sdkv1.MQ { - return errs.Must(conn[*mq_sdkv1.MQ](ctx, c, names.MQ)) + return errs.Must(conn[*mq_sdkv1.MQ](ctx, c, names.MQ, make(map[string]any))) } func (c *AWSClient) MWAAConn(ctx context.Context) *mwaa_sdkv1.MWAA { - return errs.Must(conn[*mwaa_sdkv1.MWAA](ctx, c, names.MWAA)) + return errs.Must(conn[*mwaa_sdkv1.MWAA](ctx, c, names.MWAA, make(map[string]any))) } func (c *AWSClient) Macie2Conn(ctx context.Context) *macie2_sdkv1.Macie2 { - return errs.Must(conn[*macie2_sdkv1.Macie2](ctx, c, names.Macie2)) + return errs.Must(conn[*macie2_sdkv1.Macie2](ctx, c, names.Macie2, make(map[string]any))) } func (c *AWSClient) MediaConnectClient(ctx context.Context) *mediaconnect_sdkv2.Client { @@ -801,7 +801,7 @@ func (c *AWSClient) MediaConnectClient(ctx context.Context) *mediaconnect_sdkv2. } func (c *AWSClient) MediaConvertConn(ctx context.Context) *mediaconvert_sdkv1.MediaConvert { - return errs.Must(conn[*mediaconvert_sdkv1.MediaConvert](ctx, c, names.MediaConvert)) + return errs.Must(conn[*mediaconvert_sdkv1.MediaConvert](ctx, c, names.MediaConvert, make(map[string]any))) } func (c *AWSClient) MediaLiveClient(ctx context.Context) *medialive_sdkv2.Client { @@ -817,23 +817,23 @@ func (c *AWSClient) MediaPackageV2Client(ctx context.Context) *mediapackagev2_sd } func (c *AWSClient) MediaStoreConn(ctx context.Context) *mediastore_sdkv1.MediaStore { - return errs.Must(conn[*mediastore_sdkv1.MediaStore](ctx, c, names.MediaStore)) + return errs.Must(conn[*mediastore_sdkv1.MediaStore](ctx, c, names.MediaStore, make(map[string]any))) } func (c *AWSClient) MemoryDBConn(ctx context.Context) *memorydb_sdkv1.MemoryDB { - return errs.Must(conn[*memorydb_sdkv1.MemoryDB](ctx, c, names.MemoryDB)) + return errs.Must(conn[*memorydb_sdkv1.MemoryDB](ctx, c, names.MemoryDB, make(map[string]any))) } func (c *AWSClient) NeptuneConn(ctx context.Context) *neptune_sdkv1.Neptune { - return errs.Must(conn[*neptune_sdkv1.Neptune](ctx, c, names.Neptune)) + return errs.Must(conn[*neptune_sdkv1.Neptune](ctx, c, names.Neptune, make(map[string]any))) } func (c *AWSClient) NetworkFirewallConn(ctx context.Context) *networkfirewall_sdkv1.NetworkFirewall { - return errs.Must(conn[*networkfirewall_sdkv1.NetworkFirewall](ctx, c, names.NetworkFirewall)) + return errs.Must(conn[*networkfirewall_sdkv1.NetworkFirewall](ctx, c, names.NetworkFirewall, make(map[string]any))) } func (c *AWSClient) NetworkManagerConn(ctx context.Context) *networkmanager_sdkv1.NetworkManager { - return errs.Must(conn[*networkmanager_sdkv1.NetworkManager](ctx, c, names.NetworkManager)) + return errs.Must(conn[*networkmanager_sdkv1.NetworkManager](ctx, c, names.NetworkManager, make(map[string]any))) } func (c *AWSClient) ObservabilityAccessManagerClient(ctx context.Context) *oam_sdkv2.Client { @@ -841,7 +841,7 @@ func (c *AWSClient) ObservabilityAccessManagerClient(ctx context.Context) *oam_s } func (c *AWSClient) OpenSearchConn(ctx context.Context) *opensearchservice_sdkv1.OpenSearchService { - return errs.Must(conn[*opensearchservice_sdkv1.OpenSearchService](ctx, c, names.OpenSearch)) + return errs.Must(conn[*opensearchservice_sdkv1.OpenSearchService](ctx, c, names.OpenSearch, make(map[string]any))) } func (c *AWSClient) OpenSearchIngestionClient(ctx context.Context) *osis_sdkv2.Client { @@ -853,19 +853,19 @@ func (c *AWSClient) OpenSearchServerlessClient(ctx context.Context) *opensearchs } func (c *AWSClient) OpsWorksConn(ctx context.Context) *opsworks_sdkv1.OpsWorks { - return errs.Must(conn[*opsworks_sdkv1.OpsWorks](ctx, c, names.OpsWorks)) + return errs.Must(conn[*opsworks_sdkv1.OpsWorks](ctx, c, names.OpsWorks, make(map[string]any))) } func (c *AWSClient) OrganizationsConn(ctx context.Context) *organizations_sdkv1.Organizations { - return errs.Must(conn[*organizations_sdkv1.Organizations](ctx, c, names.Organizations)) + return errs.Must(conn[*organizations_sdkv1.Organizations](ctx, c, names.Organizations, make(map[string]any))) } func (c *AWSClient) OutpostsConn(ctx context.Context) *outposts_sdkv1.Outposts { - return errs.Must(conn[*outposts_sdkv1.Outposts](ctx, c, names.Outposts)) + return errs.Must(conn[*outposts_sdkv1.Outposts](ctx, c, names.Outposts, make(map[string]any))) } func (c *AWSClient) PinpointConn(ctx context.Context) *pinpoint_sdkv1.Pinpoint { - return errs.Must(conn[*pinpoint_sdkv1.Pinpoint](ctx, c, names.Pinpoint)) + return errs.Must(conn[*pinpoint_sdkv1.Pinpoint](ctx, c, names.Pinpoint, make(map[string]any))) } func (c *AWSClient) PipesClient(ctx context.Context) *pipes_sdkv2.Client { @@ -885,11 +885,11 @@ func (c *AWSClient) QLDBClient(ctx context.Context) *qldb_sdkv2.Client { } func (c *AWSClient) QuickSightConn(ctx context.Context) *quicksight_sdkv1.QuickSight { - return errs.Must(conn[*quicksight_sdkv1.QuickSight](ctx, c, names.QuickSight)) + return errs.Must(conn[*quicksight_sdkv1.QuickSight](ctx, c, names.QuickSight, make(map[string]any))) } func (c *AWSClient) RAMConn(ctx context.Context) *ram_sdkv1.RAM { - return errs.Must(conn[*ram_sdkv1.RAM](ctx, c, names.RAM)) + return errs.Must(conn[*ram_sdkv1.RAM](ctx, c, names.RAM, make(map[string]any))) } func (c *AWSClient) RBinClient(ctx context.Context) *rbin_sdkv2.Client { @@ -897,7 +897,7 @@ func (c *AWSClient) RBinClient(ctx context.Context) *rbin_sdkv2.Client { } func (c *AWSClient) RDSConn(ctx context.Context) *rds_sdkv1.RDS { - return errs.Must(conn[*rds_sdkv1.RDS](ctx, c, names.RDS)) + return errs.Must(conn[*rds_sdkv1.RDS](ctx, c, names.RDS, make(map[string]any))) } func (c *AWSClient) RDSClient(ctx context.Context) *rds_sdkv2.Client { @@ -905,11 +905,11 @@ func (c *AWSClient) RDSClient(ctx context.Context) *rds_sdkv2.Client { } func (c *AWSClient) RUMConn(ctx context.Context) *cloudwatchrum_sdkv1.CloudWatchRUM { - return errs.Must(conn[*cloudwatchrum_sdkv1.CloudWatchRUM](ctx, c, names.RUM)) + return errs.Must(conn[*cloudwatchrum_sdkv1.CloudWatchRUM](ctx, c, names.RUM, make(map[string]any))) } func (c *AWSClient) RedshiftConn(ctx context.Context) *redshift_sdkv1.Redshift { - return errs.Must(conn[*redshift_sdkv1.Redshift](ctx, c, names.Redshift)) + return errs.Must(conn[*redshift_sdkv1.Redshift](ctx, c, names.Redshift, make(map[string]any))) } func (c *AWSClient) RedshiftDataClient(ctx context.Context) *redshiftdata_sdkv2.Client { @@ -917,7 +917,7 @@ func (c *AWSClient) RedshiftDataClient(ctx context.Context) *redshiftdata_sdkv2. } func (c *AWSClient) RedshiftServerlessConn(ctx context.Context) *redshiftserverless_sdkv1.RedshiftServerless { - return errs.Must(conn[*redshiftserverless_sdkv1.RedshiftServerless](ctx, c, names.RedshiftServerless)) + return errs.Must(conn[*redshiftserverless_sdkv1.RedshiftServerless](ctx, c, names.RedshiftServerless, make(map[string]any))) } func (c *AWSClient) ResourceExplorer2Client(ctx context.Context) *resourceexplorer2_sdkv2.Client { @@ -937,7 +937,7 @@ func (c *AWSClient) RolesAnywhereClient(ctx context.Context) *rolesanywhere_sdkv } func (c *AWSClient) Route53Conn(ctx context.Context) *route53_sdkv1.Route53 { - return errs.Must(conn[*route53_sdkv1.Route53](ctx, c, names.Route53)) + return errs.Must(conn[*route53_sdkv1.Route53](ctx, c, names.Route53, make(map[string]any))) } func (c *AWSClient) Route53DomainsClient(ctx context.Context) *route53domains_sdkv2.Client { @@ -945,19 +945,19 @@ func (c *AWSClient) Route53DomainsClient(ctx context.Context) *route53domains_sd } func (c *AWSClient) Route53RecoveryControlConfigConn(ctx context.Context) *route53recoverycontrolconfig_sdkv1.Route53RecoveryControlConfig { - return errs.Must(conn[*route53recoverycontrolconfig_sdkv1.Route53RecoveryControlConfig](ctx, c, names.Route53RecoveryControlConfig)) + return errs.Must(conn[*route53recoverycontrolconfig_sdkv1.Route53RecoveryControlConfig](ctx, c, names.Route53RecoveryControlConfig, make(map[string]any))) } func (c *AWSClient) Route53RecoveryReadinessConn(ctx context.Context) *route53recoveryreadiness_sdkv1.Route53RecoveryReadiness { - return errs.Must(conn[*route53recoveryreadiness_sdkv1.Route53RecoveryReadiness](ctx, c, names.Route53RecoveryReadiness)) + return errs.Must(conn[*route53recoveryreadiness_sdkv1.Route53RecoveryReadiness](ctx, c, names.Route53RecoveryReadiness, make(map[string]any))) } func (c *AWSClient) Route53ResolverConn(ctx context.Context) *route53resolver_sdkv1.Route53Resolver { - return errs.Must(conn[*route53resolver_sdkv1.Route53Resolver](ctx, c, names.Route53Resolver)) + return errs.Must(conn[*route53resolver_sdkv1.Route53Resolver](ctx, c, names.Route53Resolver, make(map[string]any))) } func (c *AWSClient) S3Conn(ctx context.Context) *s3_sdkv1.S3 { - return errs.Must(conn[*s3_sdkv1.S3](ctx, c, names.S3)) + return errs.Must(conn[*s3_sdkv1.S3](ctx, c, names.S3, make(map[string]any))) } func (c *AWSClient) S3Client(ctx context.Context) *s3_sdkv2.Client { @@ -969,11 +969,11 @@ func (c *AWSClient) S3ControlClient(ctx context.Context) *s3control_sdkv2.Client } func (c *AWSClient) S3OutpostsConn(ctx context.Context) *s3outposts_sdkv1.S3Outposts { - return errs.Must(conn[*s3outposts_sdkv1.S3Outposts](ctx, c, names.S3Outposts)) + return errs.Must(conn[*s3outposts_sdkv1.S3Outposts](ctx, c, names.S3Outposts, make(map[string]any))) } func (c *AWSClient) SESConn(ctx context.Context) *ses_sdkv1.SES { - return errs.Must(conn[*ses_sdkv1.SES](ctx, c, names.SES)) + return errs.Must(conn[*ses_sdkv1.SES](ctx, c, names.SES, make(map[string]any))) } func (c *AWSClient) SESV2Client(ctx context.Context) *sesv2_sdkv2.Client { @@ -981,7 +981,7 @@ func (c *AWSClient) SESV2Client(ctx context.Context) *sesv2_sdkv2.Client { } func (c *AWSClient) SFNConn(ctx context.Context) *sfn_sdkv1.SFN { - return errs.Must(conn[*sfn_sdkv1.SFN](ctx, c, names.SFN)) + return errs.Must(conn[*sfn_sdkv1.SFN](ctx, c, names.SFN, make(map[string]any))) } func (c *AWSClient) SNSClient(ctx context.Context) *sns_sdkv2.Client { @@ -993,7 +993,7 @@ func (c *AWSClient) SQSClient(ctx context.Context) *sqs_sdkv2.Client { } func (c *AWSClient) SSMConn(ctx context.Context) *ssm_sdkv1.SSM { - return errs.Must(conn[*ssm_sdkv1.SSM](ctx, c, names.SSM)) + return errs.Must(conn[*ssm_sdkv1.SSM](ctx, c, names.SSM, make(map[string]any))) } func (c *AWSClient) SSMClient(ctx context.Context) *ssm_sdkv2.Client { @@ -1013,7 +1013,7 @@ func (c *AWSClient) SSOAdminClient(ctx context.Context) *ssoadmin_sdkv2.Client { } func (c *AWSClient) STSConn(ctx context.Context) *sts_sdkv1.STS { - return errs.Must(conn[*sts_sdkv1.STS](ctx, c, names.STS)) + return errs.Must(conn[*sts_sdkv1.STS](ctx, c, names.STS, make(map[string]any))) } func (c *AWSClient) STSClient(ctx context.Context) *sts_sdkv2.Client { @@ -1025,7 +1025,7 @@ func (c *AWSClient) SWFClient(ctx context.Context) *swf_sdkv2.Client { } func (c *AWSClient) SageMakerConn(ctx context.Context) *sagemaker_sdkv1.SageMaker { - return errs.Must(conn[*sagemaker_sdkv1.SageMaker](ctx, c, names.SageMaker)) + return errs.Must(conn[*sagemaker_sdkv1.SageMaker](ctx, c, names.SageMaker, make(map[string]any))) } func (c *AWSClient) SchedulerClient(ctx context.Context) *scheduler_sdkv2.Client { @@ -1033,11 +1033,11 @@ func (c *AWSClient) SchedulerClient(ctx context.Context) *scheduler_sdkv2.Client } func (c *AWSClient) SchemasConn(ctx context.Context) *schemas_sdkv1.Schemas { - return errs.Must(conn[*schemas_sdkv1.Schemas](ctx, c, names.Schemas)) + return errs.Must(conn[*schemas_sdkv1.Schemas](ctx, c, names.Schemas, make(map[string]any))) } func (c *AWSClient) SecretsManagerConn(ctx context.Context) *secretsmanager_sdkv1.SecretsManager { - return errs.Must(conn[*secretsmanager_sdkv1.SecretsManager](ctx, c, names.SecretsManager)) + return errs.Must(conn[*secretsmanager_sdkv1.SecretsManager](ctx, c, names.SecretsManager, make(map[string]any))) } func (c *AWSClient) SecurityHubClient(ctx context.Context) *securityhub_sdkv2.Client { @@ -1049,15 +1049,15 @@ func (c *AWSClient) SecurityLakeClient(ctx context.Context) *securitylake_sdkv2. } func (c *AWSClient) ServerlessRepoConn(ctx context.Context) *serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository { - return errs.Must(conn[*serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository](ctx, c, names.ServerlessRepo)) + return errs.Must(conn[*serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository](ctx, c, names.ServerlessRepo, make(map[string]any))) } func (c *AWSClient) ServiceCatalogConn(ctx context.Context) *servicecatalog_sdkv1.ServiceCatalog { - return errs.Must(conn[*servicecatalog_sdkv1.ServiceCatalog](ctx, c, names.ServiceCatalog)) + return errs.Must(conn[*servicecatalog_sdkv1.ServiceCatalog](ctx, c, names.ServiceCatalog, make(map[string]any))) } func (c *AWSClient) ServiceDiscoveryConn(ctx context.Context) *servicediscovery_sdkv1.ServiceDiscovery { - return errs.Must(conn[*servicediscovery_sdkv1.ServiceDiscovery](ctx, c, names.ServiceDiscovery)) + return errs.Must(conn[*servicediscovery_sdkv1.ServiceDiscovery](ctx, c, names.ServiceDiscovery, make(map[string]any))) } func (c *AWSClient) ServiceQuotasClient(ctx context.Context) *servicequotas_sdkv2.Client { @@ -1065,7 +1065,7 @@ func (c *AWSClient) ServiceQuotasClient(ctx context.Context) *servicequotas_sdkv } func (c *AWSClient) ShieldConn(ctx context.Context) *shield_sdkv1.Shield { - return errs.Must(conn[*shield_sdkv1.Shield](ctx, c, names.Shield)) + return errs.Must(conn[*shield_sdkv1.Shield](ctx, c, names.Shield, make(map[string]any))) } func (c *AWSClient) SignerClient(ctx context.Context) *signer_sdkv2.Client { @@ -1073,15 +1073,15 @@ func (c *AWSClient) SignerClient(ctx context.Context) *signer_sdkv2.Client { } func (c *AWSClient) SimpleDBConn(ctx context.Context) *simpledb_sdkv1.SimpleDB { - return errs.Must(conn[*simpledb_sdkv1.SimpleDB](ctx, c, names.SimpleDB)) + return errs.Must(conn[*simpledb_sdkv1.SimpleDB](ctx, c, names.SimpleDB, make(map[string]any))) } func (c *AWSClient) StorageGatewayConn(ctx context.Context) *storagegateway_sdkv1.StorageGateway { - return errs.Must(conn[*storagegateway_sdkv1.StorageGateway](ctx, c, names.StorageGateway)) + return errs.Must(conn[*storagegateway_sdkv1.StorageGateway](ctx, c, names.StorageGateway, make(map[string]any))) } func (c *AWSClient) SyntheticsConn(ctx context.Context) *synthetics_sdkv1.Synthetics { - return errs.Must(conn[*synthetics_sdkv1.Synthetics](ctx, c, names.Synthetics)) + return errs.Must(conn[*synthetics_sdkv1.Synthetics](ctx, c, names.Synthetics, make(map[string]any))) } func (c *AWSClient) TimestreamWriteClient(ctx context.Context) *timestreamwrite_sdkv2.Client { @@ -1093,7 +1093,7 @@ func (c *AWSClient) TranscribeClient(ctx context.Context) *transcribe_sdkv2.Clie } func (c *AWSClient) TransferConn(ctx context.Context) *transfer_sdkv1.Transfer { - return errs.Must(conn[*transfer_sdkv1.Transfer](ctx, c, names.Transfer)) + return errs.Must(conn[*transfer_sdkv1.Transfer](ctx, c, names.Transfer, make(map[string]any))) } func (c *AWSClient) VPCLatticeClient(ctx context.Context) *vpclattice_sdkv2.Client { @@ -1105,19 +1105,19 @@ func (c *AWSClient) VerifiedPermissionsClient(ctx context.Context) *verifiedperm } func (c *AWSClient) WAFConn(ctx context.Context) *waf_sdkv1.WAF { - return errs.Must(conn[*waf_sdkv1.WAF](ctx, c, names.WAF)) + return errs.Must(conn[*waf_sdkv1.WAF](ctx, c, names.WAF, make(map[string]any))) } func (c *AWSClient) WAFRegionalConn(ctx context.Context) *wafregional_sdkv1.WAFRegional { - return errs.Must(conn[*wafregional_sdkv1.WAFRegional](ctx, c, names.WAFRegional)) + return errs.Must(conn[*wafregional_sdkv1.WAFRegional](ctx, c, names.WAFRegional, make(map[string]any))) } func (c *AWSClient) WAFV2Conn(ctx context.Context) *wafv2_sdkv1.WAFV2 { - return errs.Must(conn[*wafv2_sdkv1.WAFV2](ctx, c, names.WAFV2)) + return errs.Must(conn[*wafv2_sdkv1.WAFV2](ctx, c, names.WAFV2, make(map[string]any))) } func (c *AWSClient) WorkLinkConn(ctx context.Context) *worklink_sdkv1.WorkLink { - return errs.Must(conn[*worklink_sdkv1.WorkLink](ctx, c, names.WorkLink)) + return errs.Must(conn[*worklink_sdkv1.WorkLink](ctx, c, names.WorkLink, make(map[string]any))) } func (c *AWSClient) WorkSpacesClient(ctx context.Context) *workspaces_sdkv2.Client { diff --git a/internal/generate/awsclient/file.tmpl b/internal/generate/awsclient/file.tmpl index b93c0f591b5..ec27a41f538 100644 --- a/internal/generate/awsclient/file.tmpl +++ b/internal/generate/awsclient/file.tmpl @@ -19,7 +19,7 @@ import ( {{ range .Services }} {{if eq .SDKVersion "1" "1,2" }} func (c *AWSClient) {{ .ProviderNameUpper }}Conn(ctx context.Context) *{{ .GoV1Package }}_sdkv1.{{ .GoV1ClientTypeName }} { - return errs.Must(conn[*{{ .GoV1Package }}_sdkv1.{{ .GoV1ClientTypeName }}](ctx, c, names.{{ .ProviderNameUpper }})) + return errs.Must(conn[*{{ .GoV1Package }}_sdkv1.{{ .GoV1ClientTypeName }}](ctx, c, names.{{ .ProviderNameUpper }}, make(map[string]any))) } {{- end }} From 2f627d57a283ea8954c53b34166eec72d6198769 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 14:32:28 -0500 Subject: [PATCH 084/438] Add 'extra' to AWS SDK for Go v2 API client factory. --- internal/conns/awsclient.go | 2 +- internal/conns/awsclient_gen.go | 178 +++++++++++++------------- internal/generate/awsclient/file.tmpl | 2 +- 3 files changed, 91 insertions(+), 91 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 885cd6e5c74..8170349f767 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -236,7 +236,7 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e } // client returns the AWS SDK for Go v2 API client for the specified service. -func client[T any](ctx context.Context, c *AWSClient, servicePackageName string) (T, error) { +func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, extra map[string]any) (T, error) { c.lock.Lock() defer c.lock.Unlock() diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 89963e5e535..265415a822b 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -233,7 +233,7 @@ import ( ) func (c *AWSClient) ACMClient(ctx context.Context) *acm_sdkv2.Client { - return errs.Must(client[*acm_sdkv2.Client](ctx, c, names.ACM)) + return errs.Must(client[*acm_sdkv2.Client](ctx, c, names.ACM, make(map[string]any))) } func (c *AWSClient) ACMPCAConn(ctx context.Context) *acmpca_sdkv1.ACMPCA { @@ -253,11 +253,11 @@ func (c *AWSClient) APIGatewayV2Conn(ctx context.Context) *apigatewayv2_sdkv1.Ap } func (c *AWSClient) AccessAnalyzerClient(ctx context.Context) *accessanalyzer_sdkv2.Client { - return errs.Must(client[*accessanalyzer_sdkv2.Client](ctx, c, names.AccessAnalyzer)) + return errs.Must(client[*accessanalyzer_sdkv2.Client](ctx, c, names.AccessAnalyzer, make(map[string]any))) } func (c *AWSClient) AccountClient(ctx context.Context) *account_sdkv2.Client { - return errs.Must(client[*account_sdkv2.Client](ctx, c, names.Account)) + return errs.Must(client[*account_sdkv2.Client](ctx, c, names.Account, make(map[string]any))) } func (c *AWSClient) AmplifyConn(ctx context.Context) *amplify_sdkv1.Amplify { @@ -273,15 +273,15 @@ func (c *AWSClient) AppConfigConn(ctx context.Context) *appconfig_sdkv1.AppConfi } func (c *AWSClient) AppConfigClient(ctx context.Context) *appconfig_sdkv2.Client { - return errs.Must(client[*appconfig_sdkv2.Client](ctx, c, names.AppConfig)) + return errs.Must(client[*appconfig_sdkv2.Client](ctx, c, names.AppConfig, make(map[string]any))) } func (c *AWSClient) AppFabricClient(ctx context.Context) *appfabric_sdkv2.Client { - return errs.Must(client[*appfabric_sdkv2.Client](ctx, c, names.AppFabric)) + return errs.Must(client[*appfabric_sdkv2.Client](ctx, c, names.AppFabric, make(map[string]any))) } func (c *AWSClient) AppFlowClient(ctx context.Context) *appflow_sdkv2.Client { - return errs.Must(client[*appflow_sdkv2.Client](ctx, c, names.AppFlow)) + return errs.Must(client[*appflow_sdkv2.Client](ctx, c, names.AppFlow, make(map[string]any))) } func (c *AWSClient) AppIntegrationsConn(ctx context.Context) *appintegrationsservice_sdkv1.AppIntegrationsService { @@ -293,7 +293,7 @@ func (c *AWSClient) AppMeshConn(ctx context.Context) *appmesh_sdkv1.AppMesh { } func (c *AWSClient) AppRunnerClient(ctx context.Context) *apprunner_sdkv2.Client { - return errs.Must(client[*apprunner_sdkv2.Client](ctx, c, names.AppRunner)) + return errs.Must(client[*apprunner_sdkv2.Client](ctx, c, names.AppRunner, make(map[string]any))) } func (c *AWSClient) AppStreamConn(ctx context.Context) *appstream_sdkv1.AppStream { @@ -309,11 +309,11 @@ func (c *AWSClient) ApplicationInsightsConn(ctx context.Context) *applicationins } func (c *AWSClient) AthenaClient(ctx context.Context) *athena_sdkv2.Client { - return errs.Must(client[*athena_sdkv2.Client](ctx, c, names.Athena)) + return errs.Must(client[*athena_sdkv2.Client](ctx, c, names.Athena, make(map[string]any))) } func (c *AWSClient) AuditManagerClient(ctx context.Context) *auditmanager_sdkv2.Client { - return errs.Must(client[*auditmanager_sdkv2.Client](ctx, c, names.AuditManager)) + return errs.Must(client[*auditmanager_sdkv2.Client](ctx, c, names.AuditManager, make(map[string]any))) } func (c *AWSClient) AutoScalingConn(ctx context.Context) *autoscaling_sdkv1.AutoScaling { @@ -333,7 +333,7 @@ func (c *AWSClient) BatchConn(ctx context.Context) *batch_sdkv1.Batch { } func (c *AWSClient) BedrockClient(ctx context.Context) *bedrock_sdkv2.Client { - return errs.Must(client[*bedrock_sdkv2.Client](ctx, c, names.Bedrock)) + return errs.Must(client[*bedrock_sdkv2.Client](ctx, c, names.Bedrock, make(map[string]any))) } func (c *AWSClient) BudgetsConn(ctx context.Context) *budgets_sdkv1.Budgets { @@ -353,15 +353,15 @@ func (c *AWSClient) ChimeConn(ctx context.Context) *chime_sdkv1.Chime { } func (c *AWSClient) ChimeSDKMediaPipelinesClient(ctx context.Context) *chimesdkmediapipelines_sdkv2.Client { - return errs.Must(client[*chimesdkmediapipelines_sdkv2.Client](ctx, c, names.ChimeSDKMediaPipelines)) + return errs.Must(client[*chimesdkmediapipelines_sdkv2.Client](ctx, c, names.ChimeSDKMediaPipelines, make(map[string]any))) } func (c *AWSClient) ChimeSDKVoiceClient(ctx context.Context) *chimesdkvoice_sdkv2.Client { - return errs.Must(client[*chimesdkvoice_sdkv2.Client](ctx, c, names.ChimeSDKVoice)) + return errs.Must(client[*chimesdkvoice_sdkv2.Client](ctx, c, names.ChimeSDKVoice, make(map[string]any))) } func (c *AWSClient) CleanRoomsClient(ctx context.Context) *cleanrooms_sdkv2.Client { - return errs.Must(client[*cleanrooms_sdkv2.Client](ctx, c, names.CleanRooms)) + return errs.Must(client[*cleanrooms_sdkv2.Client](ctx, c, names.CleanRooms, make(map[string]any))) } func (c *AWSClient) Cloud9Conn(ctx context.Context) *cloud9_sdkv1.Cloud9 { @@ -369,7 +369,7 @@ func (c *AWSClient) Cloud9Conn(ctx context.Context) *cloud9_sdkv1.Cloud9 { } func (c *AWSClient) CloudControlClient(ctx context.Context) *cloudcontrol_sdkv2.Client { - return errs.Must(client[*cloudcontrol_sdkv2.Client](ctx, c, names.CloudControl)) + return errs.Must(client[*cloudcontrol_sdkv2.Client](ctx, c, names.CloudControl, make(map[string]any))) } func (c *AWSClient) CloudFormationConn(ctx context.Context) *cloudformation_sdkv1.CloudFormation { @@ -405,7 +405,7 @@ func (c *AWSClient) CodeBuildConn(ctx context.Context) *codebuild_sdkv1.CodeBuil } func (c *AWSClient) CodeCatalystClient(ctx context.Context) *codecatalyst_sdkv2.Client { - return errs.Must(client[*codecatalyst_sdkv2.Client](ctx, c, names.CodeCatalyst)) + return errs.Must(client[*codecatalyst_sdkv2.Client](ctx, c, names.CodeCatalyst, make(map[string]any))) } func (c *AWSClient) CodeCommitConn(ctx context.Context) *codecommit_sdkv1.CodeCommit { @@ -413,7 +413,7 @@ func (c *AWSClient) CodeCommitConn(ctx context.Context) *codecommit_sdkv1.CodeCo } func (c *AWSClient) CodeGuruProfilerClient(ctx context.Context) *codeguruprofiler_sdkv2.Client { - return errs.Must(client[*codeguruprofiler_sdkv2.Client](ctx, c, names.CodeGuruProfiler)) + return errs.Must(client[*codeguruprofiler_sdkv2.Client](ctx, c, names.CodeGuruProfiler, make(map[string]any))) } func (c *AWSClient) CodeGuruReviewerConn(ctx context.Context) *codegurureviewer_sdkv1.CodeGuruReviewer { @@ -425,11 +425,11 @@ func (c *AWSClient) CodePipelineConn(ctx context.Context) *codepipeline_sdkv1.Co } func (c *AWSClient) CodeStarConnectionsClient(ctx context.Context) *codestarconnections_sdkv2.Client { - return errs.Must(client[*codestarconnections_sdkv2.Client](ctx, c, names.CodeStarConnections)) + return errs.Must(client[*codestarconnections_sdkv2.Client](ctx, c, names.CodeStarConnections, make(map[string]any))) } func (c *AWSClient) CodeStarNotificationsClient(ctx context.Context) *codestarnotifications_sdkv2.Client { - return errs.Must(client[*codestarnotifications_sdkv2.Client](ctx, c, names.CodeStarNotifications)) + return errs.Must(client[*codestarnotifications_sdkv2.Client](ctx, c, names.CodeStarNotifications, make(map[string]any))) } func (c *AWSClient) CognitoIDPConn(ctx context.Context) *cognitoidentityprovider_sdkv1.CognitoIdentityProvider { @@ -441,11 +441,11 @@ func (c *AWSClient) CognitoIdentityConn(ctx context.Context) *cognitoidentity_sd } func (c *AWSClient) ComprehendClient(ctx context.Context) *comprehend_sdkv2.Client { - return errs.Must(client[*comprehend_sdkv2.Client](ctx, c, names.Comprehend)) + return errs.Must(client[*comprehend_sdkv2.Client](ctx, c, names.Comprehend, make(map[string]any))) } func (c *AWSClient) ComputeOptimizerClient(ctx context.Context) *computeoptimizer_sdkv2.Client { - return errs.Must(client[*computeoptimizer_sdkv2.Client](ctx, c, names.ComputeOptimizer)) + return errs.Must(client[*computeoptimizer_sdkv2.Client](ctx, c, names.ComputeOptimizer, make(map[string]any))) } func (c *AWSClient) ConfigServiceConn(ctx context.Context) *configservice_sdkv1.ConfigService { @@ -457,15 +457,15 @@ func (c *AWSClient) ConnectConn(ctx context.Context) *connect_sdkv1.Connect { } func (c *AWSClient) ConnectCasesClient(ctx context.Context) *connectcases_sdkv2.Client { - return errs.Must(client[*connectcases_sdkv2.Client](ctx, c, names.ConnectCases)) + return errs.Must(client[*connectcases_sdkv2.Client](ctx, c, names.ConnectCases, make(map[string]any))) } func (c *AWSClient) ControlTowerClient(ctx context.Context) *controltower_sdkv2.Client { - return errs.Must(client[*controltower_sdkv2.Client](ctx, c, names.ControlTower)) + return errs.Must(client[*controltower_sdkv2.Client](ctx, c, names.ControlTower, make(map[string]any))) } func (c *AWSClient) CustomerProfilesClient(ctx context.Context) *customerprofiles_sdkv2.Client { - return errs.Must(client[*customerprofiles_sdkv2.Client](ctx, c, names.CustomerProfiles)) + return errs.Must(client[*customerprofiles_sdkv2.Client](ctx, c, names.CustomerProfiles, make(map[string]any))) } func (c *AWSClient) DAXConn(ctx context.Context) *dax_sdkv1.DAX { @@ -485,7 +485,7 @@ func (c *AWSClient) DSConn(ctx context.Context) *directoryservice_sdkv1.Director } func (c *AWSClient) DSClient(ctx context.Context) *directoryservice_sdkv2.Client { - return errs.Must(client[*directoryservice_sdkv2.Client](ctx, c, names.DS)) + return errs.Must(client[*directoryservice_sdkv2.Client](ctx, c, names.DS, make(map[string]any))) } func (c *AWSClient) DataExchangeConn(ctx context.Context) *dataexchange_sdkv1.DataExchange { @@ -501,7 +501,7 @@ func (c *AWSClient) DataSyncConn(ctx context.Context) *datasync_sdkv1.DataSync { } func (c *AWSClient) DeployClient(ctx context.Context) *codedeploy_sdkv2.Client { - return errs.Must(client[*codedeploy_sdkv2.Client](ctx, c, names.Deploy)) + return errs.Must(client[*codedeploy_sdkv2.Client](ctx, c, names.Deploy, make(map[string]any))) } func (c *AWSClient) DetectiveConn(ctx context.Context) *detective_sdkv1.Detective { @@ -521,7 +521,7 @@ func (c *AWSClient) DocDBConn(ctx context.Context) *docdb_sdkv1.DocDB { } func (c *AWSClient) DocDBElasticClient(ctx context.Context) *docdbelastic_sdkv2.Client { - return errs.Must(client[*docdbelastic_sdkv2.Client](ctx, c, names.DocDBElastic)) + return errs.Must(client[*docdbelastic_sdkv2.Client](ctx, c, names.DocDBElastic, make(map[string]any))) } func (c *AWSClient) DynamoDBConn(ctx context.Context) *dynamodb_sdkv1.DynamoDB { @@ -533,7 +533,7 @@ func (c *AWSClient) EC2Conn(ctx context.Context) *ec2_sdkv1.EC2 { } func (c *AWSClient) EC2Client(ctx context.Context) *ec2_sdkv2.Client { - return errs.Must(client[*ec2_sdkv2.Client](ctx, c, names.EC2)) + return errs.Must(client[*ec2_sdkv2.Client](ctx, c, names.EC2, make(map[string]any))) } func (c *AWSClient) ECRConn(ctx context.Context) *ecr_sdkv1.ECR { @@ -541,7 +541,7 @@ func (c *AWSClient) ECRConn(ctx context.Context) *ecr_sdkv1.ECR { } func (c *AWSClient) ECRClient(ctx context.Context) *ecr_sdkv2.Client { - return errs.Must(client[*ecr_sdkv2.Client](ctx, c, names.ECR)) + return errs.Must(client[*ecr_sdkv2.Client](ctx, c, names.ECR, make(map[string]any))) } func (c *AWSClient) ECRPublicConn(ctx context.Context) *ecrpublic_sdkv1.ECRPublic { @@ -557,7 +557,7 @@ func (c *AWSClient) EFSConn(ctx context.Context) *efs_sdkv1.EFS { } func (c *AWSClient) EKSClient(ctx context.Context) *eks_sdkv2.Client { - return errs.Must(client[*eks_sdkv2.Client](ctx, c, names.EKS)) + return errs.Must(client[*eks_sdkv2.Client](ctx, c, names.EKS, make(map[string]any))) } func (c *AWSClient) ELBConn(ctx context.Context) *elb_sdkv1.ELB { @@ -573,7 +573,7 @@ func (c *AWSClient) EMRConn(ctx context.Context) *emr_sdkv1.EMR { } func (c *AWSClient) EMRClient(ctx context.Context) *emr_sdkv2.Client { - return errs.Must(client[*emr_sdkv2.Client](ctx, c, names.EMR)) + return errs.Must(client[*emr_sdkv2.Client](ctx, c, names.EMR, make(map[string]any))) } func (c *AWSClient) EMRContainersConn(ctx context.Context) *emrcontainers_sdkv1.EMRContainers { @@ -581,7 +581,7 @@ func (c *AWSClient) EMRContainersConn(ctx context.Context) *emrcontainers_sdkv1. } func (c *AWSClient) EMRServerlessClient(ctx context.Context) *emrserverless_sdkv2.Client { - return errs.Must(client[*emrserverless_sdkv2.Client](ctx, c, names.EMRServerless)) + return errs.Must(client[*emrserverless_sdkv2.Client](ctx, c, names.EMRServerless, make(map[string]any))) } func (c *AWSClient) ElastiCacheConn(ctx context.Context) *elasticache_sdkv1.ElastiCache { @@ -605,11 +605,11 @@ func (c *AWSClient) EventsConn(ctx context.Context) *eventbridge_sdkv1.EventBrid } func (c *AWSClient) EvidentlyClient(ctx context.Context) *evidently_sdkv2.Client { - return errs.Must(client[*evidently_sdkv2.Client](ctx, c, names.Evidently)) + return errs.Must(client[*evidently_sdkv2.Client](ctx, c, names.Evidently, make(map[string]any))) } func (c *AWSClient) FISClient(ctx context.Context) *fis_sdkv2.Client { - return errs.Must(client[*fis_sdkv2.Client](ctx, c, names.FIS)) + return errs.Must(client[*fis_sdkv2.Client](ctx, c, names.FIS, make(map[string]any))) } func (c *AWSClient) FMSConn(ctx context.Context) *fms_sdkv1.FMS { @@ -621,7 +621,7 @@ func (c *AWSClient) FSxConn(ctx context.Context) *fsx_sdkv1.FSx { } func (c *AWSClient) FinSpaceClient(ctx context.Context) *finspace_sdkv2.Client { - return errs.Must(client[*finspace_sdkv2.Client](ctx, c, names.FinSpace)) + return errs.Must(client[*finspace_sdkv2.Client](ctx, c, names.FinSpace, make(map[string]any))) } func (c *AWSClient) FirehoseConn(ctx context.Context) *firehose_sdkv1.Firehose { @@ -633,7 +633,7 @@ func (c *AWSClient) GameLiftConn(ctx context.Context) *gamelift_sdkv1.GameLift { } func (c *AWSClient) GlacierClient(ctx context.Context) *glacier_sdkv2.Client { - return errs.Must(client[*glacier_sdkv2.Client](ctx, c, names.Glacier)) + return errs.Must(client[*glacier_sdkv2.Client](ctx, c, names.Glacier, make(map[string]any))) } func (c *AWSClient) GlobalAcceleratorConn(ctx context.Context) *globalaccelerator_sdkv1.GlobalAccelerator { @@ -657,7 +657,7 @@ func (c *AWSClient) GuardDutyConn(ctx context.Context) *guardduty_sdkv1.GuardDut } func (c *AWSClient) HealthLakeClient(ctx context.Context) *healthlake_sdkv2.Client { - return errs.Must(client[*healthlake_sdkv2.Client](ctx, c, names.HealthLake)) + return errs.Must(client[*healthlake_sdkv2.Client](ctx, c, names.HealthLake, make(map[string]any))) } func (c *AWSClient) IAMConn(ctx context.Context) *iam_sdkv1.IAM { @@ -669,11 +669,11 @@ func (c *AWSClient) IVSConn(ctx context.Context) *ivs_sdkv1.IVS { } func (c *AWSClient) IVSChatClient(ctx context.Context) *ivschat_sdkv2.Client { - return errs.Must(client[*ivschat_sdkv2.Client](ctx, c, names.IVSChat)) + return errs.Must(client[*ivschat_sdkv2.Client](ctx, c, names.IVSChat, make(map[string]any))) } func (c *AWSClient) IdentityStoreClient(ctx context.Context) *identitystore_sdkv2.Client { - return errs.Must(client[*identitystore_sdkv2.Client](ctx, c, names.IdentityStore)) + return errs.Must(client[*identitystore_sdkv2.Client](ctx, c, names.IdentityStore, make(map[string]any))) } func (c *AWSClient) ImageBuilderConn(ctx context.Context) *imagebuilder_sdkv1.Imagebuilder { @@ -685,11 +685,11 @@ func (c *AWSClient) InspectorConn(ctx context.Context) *inspector_sdkv1.Inspecto } func (c *AWSClient) Inspector2Client(ctx context.Context) *inspector2_sdkv2.Client { - return errs.Must(client[*inspector2_sdkv2.Client](ctx, c, names.Inspector2)) + return errs.Must(client[*inspector2_sdkv2.Client](ctx, c, names.Inspector2, make(map[string]any))) } func (c *AWSClient) InternetMonitorClient(ctx context.Context) *internetmonitor_sdkv2.Client { - return errs.Must(client[*internetmonitor_sdkv2.Client](ctx, c, names.InternetMonitor)) + return errs.Must(client[*internetmonitor_sdkv2.Client](ctx, c, names.InternetMonitor, make(map[string]any))) } func (c *AWSClient) IoTConn(ctx context.Context) *iot_sdkv1.IoT { @@ -713,7 +713,7 @@ func (c *AWSClient) KafkaConn(ctx context.Context) *kafka_sdkv1.Kafka { } func (c *AWSClient) KafkaClient(ctx context.Context) *kafka_sdkv2.Client { - return errs.Must(client[*kafka_sdkv2.Client](ctx, c, names.Kafka)) + return errs.Must(client[*kafka_sdkv2.Client](ctx, c, names.Kafka, make(map[string]any))) } func (c *AWSClient) KafkaConnectConn(ctx context.Context) *kafkaconnect_sdkv1.KafkaConnect { @@ -721,11 +721,11 @@ func (c *AWSClient) KafkaConnectConn(ctx context.Context) *kafkaconnect_sdkv1.Ka } func (c *AWSClient) KendraClient(ctx context.Context) *kendra_sdkv2.Client { - return errs.Must(client[*kendra_sdkv2.Client](ctx, c, names.Kendra)) + return errs.Must(client[*kendra_sdkv2.Client](ctx, c, names.Kendra, make(map[string]any))) } func (c *AWSClient) KeyspacesClient(ctx context.Context) *keyspaces_sdkv2.Client { - return errs.Must(client[*keyspaces_sdkv2.Client](ctx, c, names.Keyspaces)) + return errs.Must(client[*keyspaces_sdkv2.Client](ctx, c, names.Keyspaces, make(map[string]any))) } func (c *AWSClient) KinesisConn(ctx context.Context) *kinesis_sdkv1.Kinesis { @@ -753,7 +753,7 @@ func (c *AWSClient) LambdaConn(ctx context.Context) *lambda_sdkv1.Lambda { } func (c *AWSClient) LambdaClient(ctx context.Context) *lambda_sdkv2.Client { - return errs.Must(client[*lambda_sdkv2.Client](ctx, c, names.Lambda)) + return errs.Must(client[*lambda_sdkv2.Client](ctx, c, names.Lambda, make(map[string]any))) } func (c *AWSClient) LexModelsConn(ctx context.Context) *lexmodelbuildingservice_sdkv1.LexModelBuildingService { @@ -761,7 +761,7 @@ func (c *AWSClient) LexModelsConn(ctx context.Context) *lexmodelbuildingservice_ } func (c *AWSClient) LexV2ModelsClient(ctx context.Context) *lexmodelsv2_sdkv2.Client { - return errs.Must(client[*lexmodelsv2_sdkv2.Client](ctx, c, names.LexV2Models)) + return errs.Must(client[*lexmodelsv2_sdkv2.Client](ctx, c, names.LexV2Models, make(map[string]any))) } func (c *AWSClient) LicenseManagerConn(ctx context.Context) *licensemanager_sdkv1.LicenseManager { @@ -769,7 +769,7 @@ func (c *AWSClient) LicenseManagerConn(ctx context.Context) *licensemanager_sdkv } func (c *AWSClient) LightsailClient(ctx context.Context) *lightsail_sdkv2.Client { - return errs.Must(client[*lightsail_sdkv2.Client](ctx, c, names.Lightsail)) + return errs.Must(client[*lightsail_sdkv2.Client](ctx, c, names.Lightsail, make(map[string]any))) } func (c *AWSClient) LocationConn(ctx context.Context) *locationservice_sdkv1.LocationService { @@ -777,11 +777,11 @@ func (c *AWSClient) LocationConn(ctx context.Context) *locationservice_sdkv1.Loc } func (c *AWSClient) LogsClient(ctx context.Context) *cloudwatchlogs_sdkv2.Client { - return errs.Must(client[*cloudwatchlogs_sdkv2.Client](ctx, c, names.Logs)) + return errs.Must(client[*cloudwatchlogs_sdkv2.Client](ctx, c, names.Logs, make(map[string]any))) } func (c *AWSClient) LookoutMetricsClient(ctx context.Context) *lookoutmetrics_sdkv2.Client { - return errs.Must(client[*lookoutmetrics_sdkv2.Client](ctx, c, names.LookoutMetrics)) + return errs.Must(client[*lookoutmetrics_sdkv2.Client](ctx, c, names.LookoutMetrics, make(map[string]any))) } func (c *AWSClient) MQConn(ctx context.Context) *mq_sdkv1.MQ { @@ -797,7 +797,7 @@ func (c *AWSClient) Macie2Conn(ctx context.Context) *macie2_sdkv1.Macie2 { } func (c *AWSClient) MediaConnectClient(ctx context.Context) *mediaconnect_sdkv2.Client { - return errs.Must(client[*mediaconnect_sdkv2.Client](ctx, c, names.MediaConnect)) + return errs.Must(client[*mediaconnect_sdkv2.Client](ctx, c, names.MediaConnect, make(map[string]any))) } func (c *AWSClient) MediaConvertConn(ctx context.Context) *mediaconvert_sdkv1.MediaConvert { @@ -805,15 +805,15 @@ func (c *AWSClient) MediaConvertConn(ctx context.Context) *mediaconvert_sdkv1.Me } func (c *AWSClient) MediaLiveClient(ctx context.Context) *medialive_sdkv2.Client { - return errs.Must(client[*medialive_sdkv2.Client](ctx, c, names.MediaLive)) + return errs.Must(client[*medialive_sdkv2.Client](ctx, c, names.MediaLive, make(map[string]any))) } func (c *AWSClient) MediaPackageClient(ctx context.Context) *mediapackage_sdkv2.Client { - return errs.Must(client[*mediapackage_sdkv2.Client](ctx, c, names.MediaPackage)) + return errs.Must(client[*mediapackage_sdkv2.Client](ctx, c, names.MediaPackage, make(map[string]any))) } func (c *AWSClient) MediaPackageV2Client(ctx context.Context) *mediapackagev2_sdkv2.Client { - return errs.Must(client[*mediapackagev2_sdkv2.Client](ctx, c, names.MediaPackageV2)) + return errs.Must(client[*mediapackagev2_sdkv2.Client](ctx, c, names.MediaPackageV2, make(map[string]any))) } func (c *AWSClient) MediaStoreConn(ctx context.Context) *mediastore_sdkv1.MediaStore { @@ -837,7 +837,7 @@ func (c *AWSClient) NetworkManagerConn(ctx context.Context) *networkmanager_sdkv } func (c *AWSClient) ObservabilityAccessManagerClient(ctx context.Context) *oam_sdkv2.Client { - return errs.Must(client[*oam_sdkv2.Client](ctx, c, names.ObservabilityAccessManager)) + return errs.Must(client[*oam_sdkv2.Client](ctx, c, names.ObservabilityAccessManager, make(map[string]any))) } func (c *AWSClient) OpenSearchConn(ctx context.Context) *opensearchservice_sdkv1.OpenSearchService { @@ -845,11 +845,11 @@ func (c *AWSClient) OpenSearchConn(ctx context.Context) *opensearchservice_sdkv1 } func (c *AWSClient) OpenSearchIngestionClient(ctx context.Context) *osis_sdkv2.Client { - return errs.Must(client[*osis_sdkv2.Client](ctx, c, names.OpenSearchIngestion)) + return errs.Must(client[*osis_sdkv2.Client](ctx, c, names.OpenSearchIngestion, make(map[string]any))) } func (c *AWSClient) OpenSearchServerlessClient(ctx context.Context) *opensearchserverless_sdkv2.Client { - return errs.Must(client[*opensearchserverless_sdkv2.Client](ctx, c, names.OpenSearchServerless)) + return errs.Must(client[*opensearchserverless_sdkv2.Client](ctx, c, names.OpenSearchServerless, make(map[string]any))) } func (c *AWSClient) OpsWorksConn(ctx context.Context) *opsworks_sdkv1.OpsWorks { @@ -869,19 +869,19 @@ func (c *AWSClient) PinpointConn(ctx context.Context) *pinpoint_sdkv1.Pinpoint { } func (c *AWSClient) PipesClient(ctx context.Context) *pipes_sdkv2.Client { - return errs.Must(client[*pipes_sdkv2.Client](ctx, c, names.Pipes)) + return errs.Must(client[*pipes_sdkv2.Client](ctx, c, names.Pipes, make(map[string]any))) } func (c *AWSClient) PollyClient(ctx context.Context) *polly_sdkv2.Client { - return errs.Must(client[*polly_sdkv2.Client](ctx, c, names.Polly)) + return errs.Must(client[*polly_sdkv2.Client](ctx, c, names.Polly, make(map[string]any))) } func (c *AWSClient) PricingClient(ctx context.Context) *pricing_sdkv2.Client { - return errs.Must(client[*pricing_sdkv2.Client](ctx, c, names.Pricing)) + return errs.Must(client[*pricing_sdkv2.Client](ctx, c, names.Pricing, make(map[string]any))) } func (c *AWSClient) QLDBClient(ctx context.Context) *qldb_sdkv2.Client { - return errs.Must(client[*qldb_sdkv2.Client](ctx, c, names.QLDB)) + return errs.Must(client[*qldb_sdkv2.Client](ctx, c, names.QLDB, make(map[string]any))) } func (c *AWSClient) QuickSightConn(ctx context.Context) *quicksight_sdkv1.QuickSight { @@ -893,7 +893,7 @@ func (c *AWSClient) RAMConn(ctx context.Context) *ram_sdkv1.RAM { } func (c *AWSClient) RBinClient(ctx context.Context) *rbin_sdkv2.Client { - return errs.Must(client[*rbin_sdkv2.Client](ctx, c, names.RBin)) + return errs.Must(client[*rbin_sdkv2.Client](ctx, c, names.RBin, make(map[string]any))) } func (c *AWSClient) RDSConn(ctx context.Context) *rds_sdkv1.RDS { @@ -901,7 +901,7 @@ func (c *AWSClient) RDSConn(ctx context.Context) *rds_sdkv1.RDS { } func (c *AWSClient) RDSClient(ctx context.Context) *rds_sdkv2.Client { - return errs.Must(client[*rds_sdkv2.Client](ctx, c, names.RDS)) + return errs.Must(client[*rds_sdkv2.Client](ctx, c, names.RDS, make(map[string]any))) } func (c *AWSClient) RUMConn(ctx context.Context) *cloudwatchrum_sdkv1.CloudWatchRUM { @@ -913,7 +913,7 @@ func (c *AWSClient) RedshiftConn(ctx context.Context) *redshift_sdkv1.Redshift { } func (c *AWSClient) RedshiftDataClient(ctx context.Context) *redshiftdata_sdkv2.Client { - return errs.Must(client[*redshiftdata_sdkv2.Client](ctx, c, names.RedshiftData)) + return errs.Must(client[*redshiftdata_sdkv2.Client](ctx, c, names.RedshiftData, make(map[string]any))) } func (c *AWSClient) RedshiftServerlessConn(ctx context.Context) *redshiftserverless_sdkv1.RedshiftServerless { @@ -921,19 +921,19 @@ func (c *AWSClient) RedshiftServerlessConn(ctx context.Context) *redshiftserverl } func (c *AWSClient) ResourceExplorer2Client(ctx context.Context) *resourceexplorer2_sdkv2.Client { - return errs.Must(client[*resourceexplorer2_sdkv2.Client](ctx, c, names.ResourceExplorer2)) + return errs.Must(client[*resourceexplorer2_sdkv2.Client](ctx, c, names.ResourceExplorer2, make(map[string]any))) } func (c *AWSClient) ResourceGroupsClient(ctx context.Context) *resourcegroups_sdkv2.Client { - return errs.Must(client[*resourcegroups_sdkv2.Client](ctx, c, names.ResourceGroups)) + return errs.Must(client[*resourcegroups_sdkv2.Client](ctx, c, names.ResourceGroups, make(map[string]any))) } func (c *AWSClient) ResourceGroupsTaggingAPIClient(ctx context.Context) *resourcegroupstaggingapi_sdkv2.Client { - return errs.Must(client[*resourcegroupstaggingapi_sdkv2.Client](ctx, c, names.ResourceGroupsTaggingAPI)) + return errs.Must(client[*resourcegroupstaggingapi_sdkv2.Client](ctx, c, names.ResourceGroupsTaggingAPI, make(map[string]any))) } func (c *AWSClient) RolesAnywhereClient(ctx context.Context) *rolesanywhere_sdkv2.Client { - return errs.Must(client[*rolesanywhere_sdkv2.Client](ctx, c, names.RolesAnywhere)) + return errs.Must(client[*rolesanywhere_sdkv2.Client](ctx, c, names.RolesAnywhere, make(map[string]any))) } func (c *AWSClient) Route53Conn(ctx context.Context) *route53_sdkv1.Route53 { @@ -941,7 +941,7 @@ func (c *AWSClient) Route53Conn(ctx context.Context) *route53_sdkv1.Route53 { } func (c *AWSClient) Route53DomainsClient(ctx context.Context) *route53domains_sdkv2.Client { - return errs.Must(client[*route53domains_sdkv2.Client](ctx, c, names.Route53Domains)) + return errs.Must(client[*route53domains_sdkv2.Client](ctx, c, names.Route53Domains, make(map[string]any))) } func (c *AWSClient) Route53RecoveryControlConfigConn(ctx context.Context) *route53recoverycontrolconfig_sdkv1.Route53RecoveryControlConfig { @@ -961,11 +961,11 @@ func (c *AWSClient) S3Conn(ctx context.Context) *s3_sdkv1.S3 { } func (c *AWSClient) S3Client(ctx context.Context) *s3_sdkv2.Client { - return errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3)) + return errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, make(map[string]any))) } func (c *AWSClient) S3ControlClient(ctx context.Context) *s3control_sdkv2.Client { - return errs.Must(client[*s3control_sdkv2.Client](ctx, c, names.S3Control)) + return errs.Must(client[*s3control_sdkv2.Client](ctx, c, names.S3Control, make(map[string]any))) } func (c *AWSClient) S3OutpostsConn(ctx context.Context) *s3outposts_sdkv1.S3Outposts { @@ -977,7 +977,7 @@ func (c *AWSClient) SESConn(ctx context.Context) *ses_sdkv1.SES { } func (c *AWSClient) SESV2Client(ctx context.Context) *sesv2_sdkv2.Client { - return errs.Must(client[*sesv2_sdkv2.Client](ctx, c, names.SESV2)) + return errs.Must(client[*sesv2_sdkv2.Client](ctx, c, names.SESV2, make(map[string]any))) } func (c *AWSClient) SFNConn(ctx context.Context) *sfn_sdkv1.SFN { @@ -985,11 +985,11 @@ func (c *AWSClient) SFNConn(ctx context.Context) *sfn_sdkv1.SFN { } func (c *AWSClient) SNSClient(ctx context.Context) *sns_sdkv2.Client { - return errs.Must(client[*sns_sdkv2.Client](ctx, c, names.SNS)) + return errs.Must(client[*sns_sdkv2.Client](ctx, c, names.SNS, make(map[string]any))) } func (c *AWSClient) SQSClient(ctx context.Context) *sqs_sdkv2.Client { - return errs.Must(client[*sqs_sdkv2.Client](ctx, c, names.SQS)) + return errs.Must(client[*sqs_sdkv2.Client](ctx, c, names.SQS, make(map[string]any))) } func (c *AWSClient) SSMConn(ctx context.Context) *ssm_sdkv1.SSM { @@ -997,19 +997,19 @@ func (c *AWSClient) SSMConn(ctx context.Context) *ssm_sdkv1.SSM { } func (c *AWSClient) SSMClient(ctx context.Context) *ssm_sdkv2.Client { - return errs.Must(client[*ssm_sdkv2.Client](ctx, c, names.SSM)) + return errs.Must(client[*ssm_sdkv2.Client](ctx, c, names.SSM, make(map[string]any))) } func (c *AWSClient) SSMContactsClient(ctx context.Context) *ssmcontacts_sdkv2.Client { - return errs.Must(client[*ssmcontacts_sdkv2.Client](ctx, c, names.SSMContacts)) + return errs.Must(client[*ssmcontacts_sdkv2.Client](ctx, c, names.SSMContacts, make(map[string]any))) } func (c *AWSClient) SSMIncidentsClient(ctx context.Context) *ssmincidents_sdkv2.Client { - return errs.Must(client[*ssmincidents_sdkv2.Client](ctx, c, names.SSMIncidents)) + return errs.Must(client[*ssmincidents_sdkv2.Client](ctx, c, names.SSMIncidents, make(map[string]any))) } func (c *AWSClient) SSOAdminClient(ctx context.Context) *ssoadmin_sdkv2.Client { - return errs.Must(client[*ssoadmin_sdkv2.Client](ctx, c, names.SSOAdmin)) + return errs.Must(client[*ssoadmin_sdkv2.Client](ctx, c, names.SSOAdmin, make(map[string]any))) } func (c *AWSClient) STSConn(ctx context.Context) *sts_sdkv1.STS { @@ -1017,11 +1017,11 @@ func (c *AWSClient) STSConn(ctx context.Context) *sts_sdkv1.STS { } func (c *AWSClient) STSClient(ctx context.Context) *sts_sdkv2.Client { - return errs.Must(client[*sts_sdkv2.Client](ctx, c, names.STS)) + return errs.Must(client[*sts_sdkv2.Client](ctx, c, names.STS, make(map[string]any))) } func (c *AWSClient) SWFClient(ctx context.Context) *swf_sdkv2.Client { - return errs.Must(client[*swf_sdkv2.Client](ctx, c, names.SWF)) + return errs.Must(client[*swf_sdkv2.Client](ctx, c, names.SWF, make(map[string]any))) } func (c *AWSClient) SageMakerConn(ctx context.Context) *sagemaker_sdkv1.SageMaker { @@ -1029,7 +1029,7 @@ func (c *AWSClient) SageMakerConn(ctx context.Context) *sagemaker_sdkv1.SageMake } func (c *AWSClient) SchedulerClient(ctx context.Context) *scheduler_sdkv2.Client { - return errs.Must(client[*scheduler_sdkv2.Client](ctx, c, names.Scheduler)) + return errs.Must(client[*scheduler_sdkv2.Client](ctx, c, names.Scheduler, make(map[string]any))) } func (c *AWSClient) SchemasConn(ctx context.Context) *schemas_sdkv1.Schemas { @@ -1041,11 +1041,11 @@ func (c *AWSClient) SecretsManagerConn(ctx context.Context) *secretsmanager_sdkv } func (c *AWSClient) SecurityHubClient(ctx context.Context) *securityhub_sdkv2.Client { - return errs.Must(client[*securityhub_sdkv2.Client](ctx, c, names.SecurityHub)) + return errs.Must(client[*securityhub_sdkv2.Client](ctx, c, names.SecurityHub, make(map[string]any))) } func (c *AWSClient) SecurityLakeClient(ctx context.Context) *securitylake_sdkv2.Client { - return errs.Must(client[*securitylake_sdkv2.Client](ctx, c, names.SecurityLake)) + return errs.Must(client[*securitylake_sdkv2.Client](ctx, c, names.SecurityLake, make(map[string]any))) } func (c *AWSClient) ServerlessRepoConn(ctx context.Context) *serverlessapplicationrepository_sdkv1.ServerlessApplicationRepository { @@ -1061,7 +1061,7 @@ func (c *AWSClient) ServiceDiscoveryConn(ctx context.Context) *servicediscovery_ } func (c *AWSClient) ServiceQuotasClient(ctx context.Context) *servicequotas_sdkv2.Client { - return errs.Must(client[*servicequotas_sdkv2.Client](ctx, c, names.ServiceQuotas)) + return errs.Must(client[*servicequotas_sdkv2.Client](ctx, c, names.ServiceQuotas, make(map[string]any))) } func (c *AWSClient) ShieldConn(ctx context.Context) *shield_sdkv1.Shield { @@ -1069,7 +1069,7 @@ func (c *AWSClient) ShieldConn(ctx context.Context) *shield_sdkv1.Shield { } func (c *AWSClient) SignerClient(ctx context.Context) *signer_sdkv2.Client { - return errs.Must(client[*signer_sdkv2.Client](ctx, c, names.Signer)) + return errs.Must(client[*signer_sdkv2.Client](ctx, c, names.Signer, make(map[string]any))) } func (c *AWSClient) SimpleDBConn(ctx context.Context) *simpledb_sdkv1.SimpleDB { @@ -1085,11 +1085,11 @@ func (c *AWSClient) SyntheticsConn(ctx context.Context) *synthetics_sdkv1.Synthe } func (c *AWSClient) TimestreamWriteClient(ctx context.Context) *timestreamwrite_sdkv2.Client { - return errs.Must(client[*timestreamwrite_sdkv2.Client](ctx, c, names.TimestreamWrite)) + return errs.Must(client[*timestreamwrite_sdkv2.Client](ctx, c, names.TimestreamWrite, make(map[string]any))) } func (c *AWSClient) TranscribeClient(ctx context.Context) *transcribe_sdkv2.Client { - return errs.Must(client[*transcribe_sdkv2.Client](ctx, c, names.Transcribe)) + return errs.Must(client[*transcribe_sdkv2.Client](ctx, c, names.Transcribe, make(map[string]any))) } func (c *AWSClient) TransferConn(ctx context.Context) *transfer_sdkv1.Transfer { @@ -1097,11 +1097,11 @@ func (c *AWSClient) TransferConn(ctx context.Context) *transfer_sdkv1.Transfer { } func (c *AWSClient) VPCLatticeClient(ctx context.Context) *vpclattice_sdkv2.Client { - return errs.Must(client[*vpclattice_sdkv2.Client](ctx, c, names.VPCLattice)) + return errs.Must(client[*vpclattice_sdkv2.Client](ctx, c, names.VPCLattice, make(map[string]any))) } func (c *AWSClient) VerifiedPermissionsClient(ctx context.Context) *verifiedpermissions_sdkv2.Client { - return errs.Must(client[*verifiedpermissions_sdkv2.Client](ctx, c, names.VerifiedPermissions)) + return errs.Must(client[*verifiedpermissions_sdkv2.Client](ctx, c, names.VerifiedPermissions, make(map[string]any))) } func (c *AWSClient) WAFConn(ctx context.Context) *waf_sdkv1.WAF { @@ -1121,9 +1121,9 @@ func (c *AWSClient) WorkLinkConn(ctx context.Context) *worklink_sdkv1.WorkLink { } func (c *AWSClient) WorkSpacesClient(ctx context.Context) *workspaces_sdkv2.Client { - return errs.Must(client[*workspaces_sdkv2.Client](ctx, c, names.WorkSpaces)) + return errs.Must(client[*workspaces_sdkv2.Client](ctx, c, names.WorkSpaces, make(map[string]any))) } func (c *AWSClient) XRayClient(ctx context.Context) *xray_sdkv2.Client { - return errs.Must(client[*xray_sdkv2.Client](ctx, c, names.XRay)) + return errs.Must(client[*xray_sdkv2.Client](ctx, c, names.XRay, make(map[string]any))) } diff --git a/internal/generate/awsclient/file.tmpl b/internal/generate/awsclient/file.tmpl index ec27a41f538..359233aeb7c 100644 --- a/internal/generate/awsclient/file.tmpl +++ b/internal/generate/awsclient/file.tmpl @@ -25,7 +25,7 @@ func (c *AWSClient) {{ .ProviderNameUpper }}Conn(ctx context.Context) *{{ .GoV1P {{if eq .SDKVersion "2" "1,2" }} func (c *AWSClient) {{ .ProviderNameUpper }}Client(ctx context.Context) *{{ .GoV2Package }}_sdkv2.Client { - return errs.Must(client[*{{ .GoV2Package }}_sdkv2.Client](ctx, c, names.{{ .ProviderNameUpper }})) + return errs.Must(client[*{{ .GoV2Package }}_sdkv2.Client](ctx, c, names.{{ .ProviderNameUpper }}, make(map[string]any))) } {{- end }} {{ end }} From 6bba6eefc3a8c2d024a1b37fb005f6c33fbbcbf0 Mon Sep 17 00:00:00 2001 From: hsiam261 Date: Wed, 13 Dec 2023 01:36:39 +0600 Subject: [PATCH 085/438] add better logging when dynamodb import fails --- internal/service/dynamodb/wait.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/dynamodb/wait.go b/internal/service/dynamodb/wait.go index b83d49dca5c..5a1b44a571f 100644 --- a/internal/service/dynamodb/wait.go +++ b/internal/service/dynamodb/wait.go @@ -87,6 +87,10 @@ func waitImportComplete(ctx context.Context, conn *dynamodb.DynamoDB, importArn outputRaw, err := stateConf.WaitForStateContext(ctx) + if err != nil { + err = fmt.Errorf("ImportArn %q : %w", importArn, err) + } + if output, ok := outputRaw.(*dynamodb.DescribeImportOutput); ok { return output, err } From ec7677815285462695519b54db307ebbd80bc700 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 14:39:50 -0500 Subject: [PATCH 086/438] Extras overwrite per-service defaults. --- internal/conns/awsclient.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 8170349f767..f15a24c7132 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -19,6 +19,7 @@ import ( baselogging "github.com/hashicorp/aws-sdk-go-base/v2/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" + "golang.org/x/exp/maps" ) type AWSClient struct { @@ -214,7 +215,9 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e return zero, fmt.Errorf("no AWS SDK v1 API client factory: %s", servicePackageName) } - conn, err := v.NewConn(ctx, c.apiClientConfig(servicePackageName)) + config := c.apiClientConfig(servicePackageName) + maps.Copy(config, extra) // Extras overwrite per-service defaults. + conn, err := v.NewConn(ctx, config) if err != nil { var zero T return zero, err @@ -263,7 +266,9 @@ func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, return zero, fmt.Errorf("no AWS SDK v2 API client factory: %s", servicePackageName) } - client, err := v.NewClient(ctx, c.apiClientConfig(servicePackageName)) + config := c.apiClientConfig(servicePackageName) + maps.Copy(config, extra) // Extras overwrite per-service defaults. + client, err := v.NewClient(ctx, config) if err != nil { var zero T return zero, err From b2254e1d44fd381a974568b0657cf77e2cad10d9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 14:47:16 -0500 Subject: [PATCH 087/438] Only default service client is cached. --- internal/conns/awsclient.go | 43 +++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 14 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index f15a24c7132..dd43d75d46a 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -78,6 +78,9 @@ func (client *AWSClient) RegionalHostname(prefix string) string { // This client differs from the standard S3 API client only in us-east-1 if the global S3 endpoint is used. // In that case the returned client uses the regional S3 endpoint. func (client *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { + if s3Client := client.S3Client(ctx); s3Client.Options().Region != names.GlobalRegionID { + + } return client.s3ExpressClient } @@ -192,12 +195,15 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e c.lock.Lock() defer c.lock.Unlock() - if raw, ok := c.conns[servicePackageName]; ok { - if conn, ok := raw.(T); ok { - return conn, nil - } else { - var zero T - return zero, fmt.Errorf("AWS SDK v1 API client (%s): %T, want %T", servicePackageName, raw, zero) + // Default service client is cached. + if len(extra) == 0 { + if raw, ok := c.conns[servicePackageName]; ok { + if conn, ok := raw.(T); ok { + return conn, nil + } else { + var zero T + return zero, fmt.Errorf("AWS SDK v1 API client (%s): %T, want %T", servicePackageName, raw, zero) + } } } @@ -233,7 +239,10 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e } } - c.conns[servicePackageName] = conn + // Default service client is cached. + if len(extra) == 0 { + c.conns[servicePackageName] = conn + } return conn, nil } @@ -243,12 +252,15 @@ func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, c.lock.Lock() defer c.lock.Unlock() - if raw, ok := c.clients[servicePackageName]; ok { - if client, ok := raw.(T); ok { - return client, nil - } else { - var zero T - return zero, fmt.Errorf("AWS SDK v2 API client (%s): %T, want %T", servicePackageName, raw, zero) + // Default service client is cached. + if len(extra) == 0 { + if raw, ok := c.clients[servicePackageName]; ok { + if client, ok := raw.(T); ok { + return client, nil + } else { + var zero T + return zero, fmt.Errorf("AWS SDK v2 API client (%s): %T, want %T", servicePackageName, raw, zero) + } } } @@ -276,7 +288,10 @@ func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, // All customization for AWS SDK for Go v2 API clients must be done during construction. - c.clients[servicePackageName] = client + // Default service client is cached. + if len(extra) == 0 { + c.clients[servicePackageName] = client + } return client, nil } From e5734917733378336de8598b7e8cde4ea56da2b4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 15:10:58 -0500 Subject: [PATCH 088/438] Implement 'AWSClient.S3ExpressClient()'. --- internal/conns/awsclient.go | 99 +++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 49 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index dd43d75d46a..080b6b8427a 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -17,6 +17,7 @@ import ( apigatewayv2_sdkv1 "github.com/aws/aws-sdk-go/service/apigatewayv2" mediaconvert_sdkv1 "github.com/aws/aws-sdk-go/service/mediaconvert" baselogging "github.com/hashicorp/aws-sdk-go-base/v2/logging" + "github.com/hashicorp/terraform-provider-aws/internal/errs" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/names" "golang.org/x/exp/maps" @@ -49,96 +50,95 @@ type AWSClient struct { } // CredentialsProvider returns the AWS SDK for Go v2 credentials provider. -func (client *AWSClient) CredentialsProvider() aws_sdkv2.CredentialsProvider { - if client.awsConfig == nil { +func (c *AWSClient) CredentialsProvider() aws_sdkv2.CredentialsProvider { + if c.awsConfig == nil { return nil } - return client.awsConfig.Credentials + return c.awsConfig.Credentials } -func (client *AWSClient) AwsConfig() aws_sdkv2.Config { // nosemgrep:ci.aws-in-func-name - return client.awsConfig.Copy() +func (c *AWSClient) AwsConfig() aws_sdkv2.Config { // nosemgrep:ci.aws-in-func-name + return c.awsConfig.Copy() } // PartitionHostname returns a hostname with the provider domain suffix for the partition // e.g. PREFIX.amazonaws.com // The prefix should not contain a trailing period. -func (client *AWSClient) PartitionHostname(prefix string) string { - return fmt.Sprintf("%s.%s", prefix, client.DNSSuffix) +func (c *AWSClient) PartitionHostname(prefix string) string { + return fmt.Sprintf("%s.%s", prefix, c.DNSSuffix) } // RegionalHostname returns a hostname with the provider domain suffix for the region and partition // e.g. PREFIX.us-west-2.amazonaws.com // The prefix should not contain a trailing period. -func (client *AWSClient) RegionalHostname(prefix string) string { - return fmt.Sprintf("%s.%s.%s", prefix, client.Region, client.DNSSuffix) +func (c *AWSClient) RegionalHostname(prefix string) string { + return fmt.Sprintf("%s.%s.%s", prefix, c.Region, c.DNSSuffix) } // S3ExpressClient returns an S3 API client suitable for use with S3 Express (directory buckets). // This client differs from the standard S3 API client only in us-east-1 if the global S3 endpoint is used. // In that case the returned client uses the regional S3 endpoint. -func (client *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { - if s3Client := client.S3Client(ctx); s3Client.Options().Region != names.GlobalRegionID { - - } - return client.s3ExpressClient +func (c *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { + return errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, map[string]any{ + "s3_us_east_1_regional_endpoint": endpoints_sdkv1.RegionalS3UsEast1Endpoint, + })) } // S3UsePathStyle returns the s3_force_path_style provider configuration value. -func (client *AWSClient) S3UsePathStyle() bool { - return client.s3UsePathStyle +func (c *AWSClient) S3UsePathStyle() bool { + return c.s3UsePathStyle } // SetHTTPClient sets the http.Client used for AWS API calls. // To have effect it must be called before the AWS SDK v1 Session is created. -func (client *AWSClient) SetHTTPClient(httpClient *http.Client) { - if client.Session == nil { - client.httpClient = httpClient +func (c *AWSClient) SetHTTPClient(httpClient *http.Client) { + if c.Session == nil { + c.httpClient = httpClient } } // HTTPClient returns the http.Client used for AWS API calls. -func (client *AWSClient) HTTPClient() *http.Client { - return client.httpClient +func (c *AWSClient) HTTPClient() *http.Client { + return c.httpClient } // RegisterLogger places the configured logger into Context so it can be used via `tflog`. -func (client *AWSClient) RegisterLogger(ctx context.Context) context.Context { - return baselogging.RegisterLogger(ctx, client.logger) +func (c *AWSClient) RegisterLogger(ctx context.Context) context.Context { + return baselogging.RegisterLogger(ctx, c.logger) } // APIGatewayInvokeURL returns the Amazon API Gateway (REST APIs) invoke URL for the configured AWS Region. // See https://docs.aws.amazon.com/apigateway/latest/developerguide/how-to-call-api.html. -func (client *AWSClient) APIGatewayInvokeURL(restAPIID, stageName string) string { - return fmt.Sprintf("https://%s/%s", client.RegionalHostname(fmt.Sprintf("%s.execute-api", restAPIID)), stageName) +func (c *AWSClient) APIGatewayInvokeURL(restAPIID, stageName string) string { + return fmt.Sprintf("https://%s/%s", c.RegionalHostname(fmt.Sprintf("%s.execute-api", restAPIID)), stageName) } // APIGatewayV2InvokeURL returns the Amazon API Gateway v2 (WebSocket & HTTP APIs) invoke URL for the configured AWS Region. // See https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-publish.html and // https://docs.aws.amazon.com/apigateway/latest/developerguide/apigateway-set-up-websocket-deployment.html. -func (client *AWSClient) APIGatewayV2InvokeURL(protocolType, apiID, stageName string) string { +func (c *AWSClient) APIGatewayV2InvokeURL(protocolType, apiID, stageName string) string { if protocolType == apigatewayv2_sdkv1.ProtocolTypeWebsocket { - return fmt.Sprintf("wss://%s/%s", client.RegionalHostname(fmt.Sprintf("%s.execute-api", apiID)), stageName) + return fmt.Sprintf("wss://%s/%s", c.RegionalHostname(fmt.Sprintf("%s.execute-api", apiID)), stageName) } if stageName == "$default" { - return fmt.Sprintf("https://%s/", client.RegionalHostname(fmt.Sprintf("%s.execute-api", apiID))) + return fmt.Sprintf("https://%s/", c.RegionalHostname(fmt.Sprintf("%s.execute-api", apiID))) } - return fmt.Sprintf("https://%s/%s", client.RegionalHostname(fmt.Sprintf("%s.execute-api", apiID)), stageName) + return fmt.Sprintf("https://%s/%s", c.RegionalHostname(fmt.Sprintf("%s.execute-api", apiID)), stageName) } // CloudFrontDistributionHostedZoneID returns the Route 53 hosted zone ID // for Amazon CloudFront distributions in the configured AWS partition. -func (client *AWSClient) CloudFrontDistributionHostedZoneID() string { - if client.Partition == endpoints_sdkv1.AwsCnPartitionID { +func (c *AWSClient) CloudFrontDistributionHostedZoneID() string { + if c.Partition == endpoints_sdkv1.AwsCnPartitionID { return "Z3RFFRIM2A3IF5" // See https://docs.amazonaws.cn/en_us/aws/latest/userguide/route53.html } return "Z2FDTNDATAQYW2" // See https://docs.aws.amazon.com/Route53/latest/APIReference/API_AliasTarget.html#Route53-Type-AliasTarget-HostedZoneId } // DefaultKMSKeyPolicy returns the default policy for KMS keys in the configured AWS partition. -func (client *AWSClient) DefaultKMSKeyPolicy() string { +func (c *AWSClient) DefaultKMSKeyPolicy() string { return fmt.Sprintf(` { "Id": "default", @@ -155,36 +155,36 @@ func (client *AWSClient) DefaultKMSKeyPolicy() string { } ] } -`, client.Partition, client.AccountID) +`, c.Partition, c.AccountID) } // GlobalAcceleratorHostedZoneID returns the Route 53 hosted zone ID // for AWS Global Accelerator accelerators in the configured AWS partition. -func (client *AWSClient) GlobalAcceleratorHostedZoneID() string { +func (c *AWSClient) GlobalAcceleratorHostedZoneID() string { return "Z2BJ6XQ5FK7U4H" // See https://docs.aws.amazon.com/general/latest/gr/global_accelerator.html#global_accelerator_region } // apiClientConfig returns the AWS API client configuration parameters for the specified service. -func (client *AWSClient) apiClientConfig(servicePackageName string) map[string]any { +func (c *AWSClient) apiClientConfig(servicePackageName string) map[string]any { m := map[string]any{ - "aws_sdkv2_config": client.awsConfig, - "endpoint": client.endpoints[servicePackageName], - "partition": client.Partition, - "session": client.Session, + "aws_sdkv2_config": c.awsConfig, + "endpoint": c.endpoints[servicePackageName], + "partition": c.Partition, + "session": c.Session, } switch servicePackageName { case names.S3: - m["s3_use_path_style"] = client.s3UsePathStyle + m["s3_use_path_style"] = c.s3UsePathStyle // AWS SDK for Go v2 does not use the AWS_S3_US_EAST_1_REGIONAL_ENDPOINT environment variable during configuration. // For compatibility, read it now. - if client.s3UsEast1RegionalEndpoint == endpoints_sdkv1.UnsetS3UsEast1Endpoint { + if c.s3UsEast1RegionalEndpoint == endpoints_sdkv1.UnsetS3UsEast1Endpoint { if v, err := endpoints_sdkv1.GetS3UsEast1RegionalEndpoint(os.Getenv("AWS_S3_US_EAST_1_REGIONAL_ENDPOINT")); err == nil { - client.s3UsEast1RegionalEndpoint = v + c.s3UsEast1RegionalEndpoint = v } } - m["s3_us_east_1_regional_endpoint"] = client.s3UsEast1RegionalEndpoint + m["s3_us_east_1_regional_endpoint"] = c.s3UsEast1RegionalEndpoint case names.STS: - m["sts_region"] = client.stsRegion + m["sts_region"] = c.stsRegion } return m @@ -195,8 +195,9 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e c.lock.Lock() defer c.lock.Unlock() + isDefault := len(extra) == 0 // Default service client is cached. - if len(extra) == 0 { + if isDefault { if raw, ok := c.conns[servicePackageName]; ok { if conn, ok := raw.(T); ok { return conn, nil @@ -240,7 +241,7 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e } // Default service client is cached. - if len(extra) == 0 { + if isDefault { c.conns[servicePackageName] = conn } @@ -252,8 +253,9 @@ func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, c.lock.Lock() defer c.lock.Unlock() + isDefault := len(extra) == 0 // Default service client is cached. - if len(extra) == 0 { + if isDefault { if raw, ok := c.clients[servicePackageName]; ok { if client, ok := raw.(T); ok { return client, nil @@ -288,8 +290,7 @@ func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, // All customization for AWS SDK for Go v2 API clients must be done during construction. - // Default service client is cached. - if len(extra) == 0 { + if isDefault { c.clients[servicePackageName] = client } From bb43b5554cc5626ae33ecae644075a3119546e6a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 15:13:38 -0500 Subject: [PATCH 089/438] s3: Replace 'useRegionalEndpointInUSEast1' with 'S3ExpressClient()'. --- internal/service/s3/directory_bucket.go | 12 ++++++------ internal/service/s3/directory_bucket_test.go | 8 ++++---- internal/service/s3/exports_test.go | 1 - internal/service/s3/service_package.go | 7 ------- 4 files changed, 10 insertions(+), 18 deletions(-) diff --git a/internal/service/s3/directory_bucket.go b/internal/service/s3/directory_bucket.go index 9b4dc02dc70..1b37ed97ab3 100644 --- a/internal/service/s3/directory_bucket.go +++ b/internal/service/s3/directory_bucket.go @@ -143,7 +143,7 @@ func (r *directoryBucketResource) Create(ctx context.Context, request resource.C return } - conn := r.Meta().S3Client(ctx) + conn := r.Meta().S3ExpressClient(ctx) input := &s3.CreateBucketInput{ Bucket: flex.StringFromFramework(ctx, data.Bucket), @@ -159,7 +159,7 @@ func (r *directoryBucketResource) Create(ctx context.Context, request resource.C }, } - _, err := conn.CreateBucket(ctx, input, useRegionalEndpointInUSEast1) + _, err := conn.CreateBucket(ctx, input) if err != nil { response.Diagnostics.AddError(fmt.Sprintf("creating S3 Directory Bucket (%s)", data.Bucket.ValueString()), err.Error()) @@ -189,9 +189,9 @@ func (r *directoryBucketResource) Read(ctx context.Context, request resource.Rea return } - conn := r.Meta().S3Client(ctx) + conn := r.Meta().S3ExpressClient(ctx) - err := findBucket(ctx, conn, data.Bucket.ValueString(), useRegionalEndpointInUSEast1) + err := findBucket(ctx, conn, data.Bucket.ValueString()) if tfresource.NotFound(err) { response.Diagnostics.Append(fwdiag.NewResourceNotFoundWarningDiagnostic(err)) @@ -249,11 +249,11 @@ func (r *directoryBucketResource) Delete(ctx context.Context, request resource.D return } - conn := r.Meta().S3Client(ctx) + conn := r.Meta().S3ExpressClient(ctx) _, err := conn.DeleteBucket(ctx, &s3.DeleteBucketInput{ Bucket: flex.StringFromFramework(ctx, data.ID), - }, useRegionalEndpointInUSEast1) + }) if tfawserr.ErrCodeEquals(err, errCodeBucketNotEmpty) { if data.ForceDestroy.ValueBool() { diff --git a/internal/service/s3/directory_bucket_test.go b/internal/service/s3/directory_bucket_test.go index 14ff2a70924..3f6085dddc9 100644 --- a/internal/service/s3/directory_bucket_test.go +++ b/internal/service/s3/directory_bucket_test.go @@ -99,14 +99,14 @@ func TestAccS3DirectoryBucket_forceDestroy(t *testing.T) { func testAccCheckDirectoryBucketDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_directory_bucket" { continue } - err := tfs3.FindBucket(ctx, conn, rs.Primary.ID, tfs3.UseRegionalEndpointInUSEast1) + err := tfs3.FindBucket(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { continue @@ -130,9 +130,9 @@ func testAccCheckDirectoryBucketExists(ctx context.Context, n string) resource.T return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) - return tfs3.FindBucket(ctx, conn, rs.Primary.ID, tfs3.UseRegionalEndpointInUSEast1) + return tfs3.FindBucket(ctx, conn, rs.Primary.ID) } } diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index cb4f49bc65f..a8073d65d0d 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -31,7 +31,6 @@ var ( FindReplicationConfiguration = findReplicationConfiguration FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey - UseRegionalEndpointInUSEast1 = useRegionalEndpointInUSEast1 ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration LifecycleRuleStatusDisabled = lifecycleRuleStatusDisabled diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index 690936383f2..dc1ec5a6408 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -68,10 +68,3 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( })) }), nil } - -// Functional options to force the regional endpoint in us-east-1 if the client is configured to use the global endpoint. -func useRegionalEndpointInUSEast1(o *s3_sdkv2.Options) { - if o.Region == names.GlobalRegionID { - o.Region = names.USEast1RegionID - } -} From ff8b163a9c0f5b147b62e01de3f1be55fe6a6e84 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 15:19:22 -0500 Subject: [PATCH 090/438] Cache 'AWSClient.s3ExpressClient'. --- internal/conns/awsclient.go | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 080b6b8427a..369be48ac70 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -79,9 +79,16 @@ func (c *AWSClient) RegionalHostname(prefix string) string { // This client differs from the standard S3 API client only in us-east-1 if the global S3 endpoint is used. // In that case the returned client uses the regional S3 endpoint. func (c *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { - return errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, map[string]any{ - "s3_us_east_1_regional_endpoint": endpoints_sdkv1.RegionalS3UsEast1Endpoint, - })) + c.lock.Lock() // OK since a non-default client is created. + defer c.lock.Unlock() + + if c.s3ExpressClient == nil { + c.s3ExpressClient = errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, map[string]any{ + "s3_us_east_1_regional_endpoint": endpoints_sdkv1.RegionalS3UsEast1Endpoint, + })) + } + + return c.s3ExpressClient } // S3UsePathStyle returns the s3_force_path_style provider configuration value. @@ -191,13 +198,14 @@ func (c *AWSClient) apiClientConfig(servicePackageName string) map[string]any { } // conn returns the AWS SDK for Go v1 API client for the specified service. +// The default service client (`extra` is empty) is cached. In this case the AWSClient lock is held. func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, extra map[string]any) (T, error) { - c.lock.Lock() - defer c.lock.Unlock() - isDefault := len(extra) == 0 // Default service client is cached. if isDefault { + c.lock.Lock() + defer c.lock.Unlock() // Runs at function exit, NOT block. + if raw, ok := c.conns[servicePackageName]; ok { if conn, ok := raw.(T); ok { return conn, nil @@ -249,13 +257,14 @@ func conn[T any](ctx context.Context, c *AWSClient, servicePackageName string, e } // client returns the AWS SDK for Go v2 API client for the specified service. +// The default service client (`extra` is empty) is cached. In this case the AWSClient lock is held. func client[T any](ctx context.Context, c *AWSClient, servicePackageName string, extra map[string]any) (T, error) { - c.lock.Lock() - defer c.lock.Unlock() - isDefault := len(extra) == 0 // Default service client is cached. if isDefault { + c.lock.Lock() + defer c.lock.Unlock() // Runs at function exit, NOT block. + if raw, ok := c.clients[servicePackageName]; ok { if client, ok := raw.(T); ok { return client, nil From 3bc2ab627174b16ee2c40594a0811bd7293cd51b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 15:29:09 -0500 Subject: [PATCH 091/438] Add CHANGELOG entry. --- .changelog/#####.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/#####.txt diff --git a/.changelog/#####.txt b/.changelog/#####.txt new file mode 100644 index 00000000000..fc03926ddf7 --- /dev/null +++ b/.changelog/#####.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_s3_directory_bucket: Fix `no such host` errors in `us-east-1` +``` \ No newline at end of file From b11170bbb7690804eb26b7c0e6558b7162a78b4b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 15:32:36 -0500 Subject: [PATCH 092/438] d/aws_s3_directory_buckets: Use 'S3ExpressClient'. --- .changelog/#####.txt | 4 ++++ internal/service/s3/directory_buckets_data_source.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.changelog/#####.txt b/.changelog/#####.txt index fc03926ddf7..e76252282fe 100644 --- a/.changelog/#####.txt +++ b/.changelog/#####.txt @@ -1,3 +1,7 @@ ```release-note:bug resource/aws_s3_directory_bucket: Fix `no such host` errors in `us-east-1` +``` + +```release-note:bug +data-source/aws_s3_directory_buckets: Fix `no such host` errors in `us-east-1` ``` \ No newline at end of file diff --git a/internal/service/s3/directory_buckets_data_source.go b/internal/service/s3/directory_buckets_data_source.go index 67d408d919d..fbb819a6538 100644 --- a/internal/service/s3/directory_buckets_data_source.go +++ b/internal/service/s3/directory_buckets_data_source.go @@ -58,7 +58,7 @@ func (d *directoryBucketsDataSource) Read(ctx context.Context, request datasourc return } - conn := d.Meta().S3Client(ctx) + conn := d.Meta().S3ExpressClient(ctx) input := &s3.ListDirectoryBucketsInput{} var buckets []string From bae67c862edef53fa425e97abc9cf09b8e969af8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 15:59:26 -0500 Subject: [PATCH 093/438] r/aws_s3_bucket_policy: Use 'S3ExpressClient'. --- .changelog/#####.txt | 4 ++++ internal/service/s3/bucket_policy.go | 9 +++++++++ internal/service/s3/bucket_policy_test.go | 7 +++++++ internal/service/s3/exports_test.go | 1 + 4 files changed, 21 insertions(+) diff --git a/.changelog/#####.txt b/.changelog/#####.txt index e76252282fe..da93c6e393b 100644 --- a/.changelog/#####.txt +++ b/.changelog/#####.txt @@ -4,4 +4,8 @@ resource/aws_s3_directory_bucket: Fix `no such host` errors in `us-east-1` ```release-note:bug data-source/aws_s3_directory_buckets: Fix `no such host` errors in `us-east-1` +``` + +```release-note:bug +resource/aws_s3_bucket_policy: Fix `no such host` errors in `us-east-1` for directory buckets ``` \ No newline at end of file diff --git a/internal/service/s3/bucket_policy.go b/internal/service/s3/bucket_policy.go index 32c26742832..ab0d4516985 100644 --- a/internal/service/s3/bucket_policy.go +++ b/internal/service/s3/bucket_policy.go @@ -64,6 +64,9 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta i } bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } input := &s3.PutBucketPolicyInput{ Bucket: aws.String(bucket), Policy: aws.String(policy), @@ -95,6 +98,9 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta i func resourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + if isDirectoryBucket(d.Id()) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } policy, err := findBucketPolicy(ctx, conn, d.Id()) @@ -122,6 +128,9 @@ func resourceBucketPolicyRead(ctx context.Context, d *schema.ResourceData, meta func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + if isDirectoryBucket(d.Id()) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } log.Printf("[DEBUG] Deleting S3 Bucket Policy: %s", d.Id()) _, err := conn.DeleteBucketPolicy(ctx, &s3.DeleteBucketPolicyInput{ diff --git a/internal/service/s3/bucket_policy_test.go b/internal/service/s3/bucket_policy_test.go index d60f13b1a6e..49e403b35b4 100644 --- a/internal/service/s3/bucket_policy_test.go +++ b/internal/service/s3/bucket_policy_test.go @@ -460,6 +460,10 @@ func testAccCheckBucketPolicyDestroy(ctx context.Context) resource.TestCheckFunc continue } + if tfs3.IsDirectoryBucket(rs.Primary.ID) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } + _, err := tfs3.FindBucketPolicy(ctx, conn, rs.Primary.ID) if tfresource.NotFound(err) { @@ -485,6 +489,9 @@ func testAccCheckBucketHasPolicy(ctx context.Context, n string, expectedPolicyTe } conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.ID) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } policy, err := tfs3.FindBucketPolicy(ctx, conn, rs.Primary.ID) diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index a8073d65d0d..952663c63fb 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -30,6 +30,7 @@ var ( FindPublicAccessBlockConfiguration = findPublicAccessBlockConfiguration FindReplicationConfiguration = findReplicationConfiguration FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration + IsDirectoryBucket = isDirectoryBucket SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration From 41d42970ddb7a3f2a3e588af9ac663683885c048 Mon Sep 17 00:00:00 2001 From: hsiam261 Date: Wed, 13 Dec 2023 02:57:19 +0600 Subject: [PATCH 094/438] setId before throwing exception for dynamoImportFailure for tainting Otherwise the table would get created but state won't manage it which would then require manual intervention to fix. --- internal/service/dynamodb/table.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index c61acaf9369..bdbe09fbffe 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -624,6 +624,7 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter importArn := importTableOutput.(*dynamodb.ImportTableOutput).ImportTableDescription.ImportArn if _, err = waitImportComplete(ctx, conn, *importArn, d.Timeout(schema.TimeoutCreate)); err != nil { + d.SetId(tableName) return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, d.Id(), err) } } else { From af5cc8a3163a2eff74045dc930e83f2fd2bb33b0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:15:02 -0500 Subject: [PATCH 095/438] Optimize 'AWSClient.S3ExpressClient()'. --- internal/conns/awsclient.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index 369be48ac70..aa9e51412cf 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -79,13 +79,19 @@ func (c *AWSClient) RegionalHostname(prefix string) string { // This client differs from the standard S3 API client only in us-east-1 if the global S3 endpoint is used. // In that case the returned client uses the regional S3 endpoint. func (c *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { + s3Client := c.S3Client(ctx) + c.lock.Lock() // OK since a non-default client is created. defer c.lock.Unlock() if c.s3ExpressClient == nil { - c.s3ExpressClient = errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, map[string]any{ - "s3_us_east_1_regional_endpoint": endpoints_sdkv1.RegionalS3UsEast1Endpoint, - })) + if s3Client.Options().Region == names.GlobalRegionID { + c.s3ExpressClient = errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, map[string]any{ + "s3_us_east_1_regional_endpoint": endpoints_sdkv1.RegionalS3UsEast1Endpoint, + })) + } else { + c.s3ExpressClient = s3Client + } } return c.s3ExpressClient From 728198d4b4aa91abf1f82ff37367ea6ef2d7cab3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:27:21 -0500 Subject: [PATCH 096/438] r/aws_s3_object: Use 'S3ExpressClient'. --- .changelog/#####.txt | 4 ++++ internal/service/s3/object.go | 15 ++++++++++++++- internal/service/s3/object_test.go | 22 ++++++++++++++++++++++ 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/.changelog/#####.txt b/.changelog/#####.txt index da93c6e393b..4a885641bd8 100644 --- a/.changelog/#####.txt +++ b/.changelog/#####.txt @@ -8,4 +8,8 @@ data-source/aws_s3_directory_buckets: Fix `no such host` errors in `us-east-1` ```release-note:bug resource/aws_s3_bucket_policy: Fix `no such host` errors in `us-east-1` for directory buckets +``` + +```release-note:bug +resource/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory buckets ``` \ No newline at end of file diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index b1a88a4f998..2f584184064 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -254,6 +254,9 @@ func resourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interf conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string)) @@ -315,6 +318,9 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) if d.HasChange("acl") { @@ -390,6 +396,9 @@ func resourceObjectDelete(ctx context.Context, d *schema.ResourceData, meta inte conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) var err error @@ -428,6 +437,10 @@ func resourceObjectImport(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } uploader := manager.NewUploader(conn) defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig tags := tftags.New(ctx, d.Get("tags").(map[string]interface{})) @@ -476,7 +489,7 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte input := &s3.PutObjectInput{ Body: body, - Bucket: aws.String(d.Get("bucket").(string)), + Bucket: aws.String(bucket), Key: aws.String(sdkv1CompatibleCleanKey(d.Get("key").(string))), } diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index f1b8c0bebb0..db0cfbcfcb5 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -1852,6 +1852,10 @@ func testAccCheckObjectDestroy(ctx context.Context) resource.TestCheckFunc { continue } + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], rs.Primary.Attributes["checksum_algorithm"]) if tfresource.NotFound(err) { @@ -1877,6 +1881,9 @@ func testAccCheckObjectExists(ctx context.Context, n string, v *s3.GetObjectOutp } conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } input := &s3.GetObjectInput{ Bucket: aws.String(rs.Primary.Attributes["bucket"]), @@ -1916,6 +1923,9 @@ func testAccCheckObjectACL(ctx context.Context, n string, want []string) resourc return func(s *terraform.State) error { rs := s.RootModule().Resources[n] conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } output, err := conn.GetObjectAcl(ctx, &s3.GetObjectAclInput{ Bucket: aws.String(rs.Primary.Attributes["bucket"]), @@ -1944,6 +1954,9 @@ func testAccCheckObjectStorageClass(ctx context.Context, n, want string) resourc return func(s *terraform.State) error { rs := s.RootModule().Resources[n] conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } output, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), "", "") @@ -1970,6 +1983,9 @@ func testAccCheckObjectSSE(ctx context.Context, n, want string) resource.TestChe return func(s *terraform.State) error { rs := s.RootModule().Resources[n] conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } output, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), "", "") @@ -2005,6 +2021,9 @@ func testAccCheckObjectUpdateTags(ctx context.Context, n string, oldTags, newTag return func(s *terraform.State) error { rs := s.RootModule().Resources[n] conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } return tfs3.ObjectUpdateTags(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), oldTags, newTags) } @@ -2014,6 +2033,9 @@ func testAccCheckObjectCheckTags(ctx context.Context, n string, expectedTags map return func(s *terraform.State) error { rs := s.RootModule().Resources[n] conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } got, err := tfs3.ObjectListTags(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"])) if err != nil { From 42049a3b952b737d9718c2190ba1e54a3856ee13 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:31:11 -0500 Subject: [PATCH 097/438] d/aws_s3_object: Use 'S3ExpressClient'. --- .changelog/#####.txt | 4 ++++ internal/service/s3/object_data_source.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/.changelog/#####.txt b/.changelog/#####.txt index 4a885641bd8..7a7d61884fe 100644 --- a/.changelog/#####.txt +++ b/.changelog/#####.txt @@ -12,4 +12,8 @@ resource/aws_s3_bucket_policy: Fix `no such host` errors in `us-east-1` for dire ```release-note:bug resource/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory buckets +``` + +```release-note:bug +data-source/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory buckets ``` \ No newline at end of file diff --git a/internal/service/s3/object_data_source.go b/internal/service/s3/object_data_source.go index 7d1830a703d..2e58ecf97ec 100644 --- a/internal/service/s3/object_data_source.go +++ b/internal/service/s3/object_data_source.go @@ -160,6 +160,9 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) input := &s3.HeadObjectInput{ Bucket: aws.String(bucket), From 18570a514c2929ddbe3eed411b2b09b1455ea659 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:38:46 -0500 Subject: [PATCH 098/438] r/aws_s3_object_copy: Use 'S3ExpressClient'. --- .changelog/#####.txt | 4 ++++ internal/service/s3/object_copy.go | 12 +++++++++++- internal/service/s3/object_copy_test.go | 10 ++++++++-- 3 files changed, 23 insertions(+), 3 deletions(-) diff --git a/.changelog/#####.txt b/.changelog/#####.txt index 7a7d61884fe..85a3443a5d5 100644 --- a/.changelog/#####.txt +++ b/.changelog/#####.txt @@ -16,4 +16,8 @@ resource/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory b ```release-note:bug data-source/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory buckets +``` + +```release-note:bug +resource/aws_s3_object_copy: Fix `no such host` errors in `us-east-1` for directory buckets ``` \ No newline at end of file diff --git a/internal/service/s3/object_copy.go b/internal/service/s3/object_copy.go index 4e5e2f99b0b..d38344ed3f0 100644 --- a/internal/service/s3/object_copy.go +++ b/internal/service/s3/object_copy.go @@ -334,6 +334,9 @@ func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta in conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string)) @@ -455,6 +458,9 @@ func resourceObjectCopyDelete(ctx context.Context, d *schema.ResourceData, meta conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) var err error @@ -473,11 +479,15 @@ func resourceObjectCopyDelete(ctx context.Context, d *schema.ResourceData, meta func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) input := &s3.CopyObjectInput{ - Bucket: aws.String(d.Get("bucket").(string)), + Bucket: aws.String(bucket), CopySource: aws.String(url.QueryEscape(d.Get("source").(string))), Key: aws.String(sdkv1CompatibleCleanKey(d.Get("key").(string))), } diff --git a/internal/service/s3/object_copy_test.go b/internal/service/s3/object_copy_test.go index c5f6a225fe2..fd3f0469937 100644 --- a/internal/service/s3/object_copy_test.go +++ b/internal/service/s3/object_copy_test.go @@ -511,13 +511,16 @@ func TestAccS3ObjectCopy_directoryBucket(t *testing.T) { func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_object_copy" { continue } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "") if tfresource.NotFound(err) { @@ -543,6 +546,9 @@ func testAccCheckObjectCopyExists(ctx context.Context, n string) resource.TestCh } conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "") From c12d9d7e1c2d40b18982b5edd04b5618aa5f231d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:41:51 -0500 Subject: [PATCH 099/438] d/aws_s3_objects: Use 'S3ExpressClient'. --- .changelog/#####.txt | 4 ++++ internal/service/s3/objects_data_source.go | 3 +++ 2 files changed, 7 insertions(+) diff --git a/.changelog/#####.txt b/.changelog/#####.txt index 85a3443a5d5..37e1e3628e7 100644 --- a/.changelog/#####.txt +++ b/.changelog/#####.txt @@ -20,4 +20,8 @@ data-source/aws_s3_object: Fix `no such host` errors in `us-east-1` for director ```release-note:bug resource/aws_s3_object_copy: Fix `no such host` errors in `us-east-1` for directory buckets +``` + +```release-note:bug +data-source/aws_s3_objects: Fix `no such host` errors in `us-east-1` for directory buckets ``` \ No newline at end of file diff --git a/internal/service/s3/objects_data_source.go b/internal/service/s3/objects_data_source.go index 785bad66955..ba3722362e1 100644 --- a/internal/service/s3/objects_data_source.go +++ b/internal/service/s3/objects_data_source.go @@ -87,6 +87,9 @@ func dataSourceObjectsRead(ctx context.Context, d *schema.ResourceData, meta int conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := d.Get("bucket").(string) + if isDirectoryBucket(bucket) { + conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) + } input := &s3.ListObjectsV2Input{ Bucket: aws.String(bucket), } From 3533f23c85c1b075102be231eeb79e2ffe846339 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:44:39 -0500 Subject: [PATCH 100/438] Tweak conn for directory bucket tests. --- internal/service/s3/bucket_policy_test.go | 3 +-- internal/service/s3/object_test.go | 8 ++++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/internal/service/s3/bucket_policy_test.go b/internal/service/s3/bucket_policy_test.go index 49e403b35b4..f31211381cf 100644 --- a/internal/service/s3/bucket_policy_test.go +++ b/internal/service/s3/bucket_policy_test.go @@ -453,13 +453,12 @@ func TestAccS3BucketPolicy_directoryBucket(t *testing.T) { func testAccCheckBucketPolicyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_bucket_policy" { continue } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.ID) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index db0cfbcfcb5..28cdbe57514 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -1845,13 +1845,12 @@ func testAccCheckObjectVersionIDEquals(first, second *s3.GetObjectOutput) resour func testAccCheckObjectDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - for _, rs := range s.RootModule().Resources { if rs.Type != "aws_s3_object" { continue } + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } @@ -1922,6 +1921,7 @@ func testAccCheckObjectBody(obj *s3.GetObjectOutput, want string) resource.TestC func testAccCheckObjectACL(ctx context.Context, n string, want []string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) @@ -1953,6 +1953,7 @@ func testAccCheckObjectACL(ctx context.Context, n string, want []string) resourc func testAccCheckObjectStorageClass(ctx context.Context, n, want string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) @@ -1982,6 +1983,7 @@ func testAccCheckObjectStorageClass(ctx context.Context, n, want string) resourc func testAccCheckObjectSSE(ctx context.Context, n, want string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) @@ -2020,6 +2022,7 @@ func testAccObjectCreateTempFile(t *testing.T, data string) string { func testAccCheckObjectUpdateTags(ctx context.Context, n string, oldTags, newTags map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) @@ -2032,6 +2035,7 @@ func testAccCheckObjectUpdateTags(ctx context.Context, n string, oldTags, newTag func testAccCheckObjectCheckTags(ctx context.Context, n string, expectedTags map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) if tfs3.IsDirectoryBucket(rs.Primary.Attributes["bucket"]) { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) From c00e6746f5f6fb16d0ba0526ca73ad8dc274bdde Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:47:17 -0500 Subject: [PATCH 101/438] Correct CHANGELOG entry file name. --- .changelog/{#####.txt => 34893.txt} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .changelog/{#####.txt => 34893.txt} (100%) diff --git a/.changelog/#####.txt b/.changelog/34893.txt similarity index 100% rename from .changelog/#####.txt rename to .changelog/34893.txt From b09aa458873365dcf2e4ca07c1dc237291d105d1 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 16:49:19 -0500 Subject: [PATCH 102/438] Tweak documentation for 's3_us_east_1_regional_endpoint'. --- website/docs/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 91c25eb8f71..9dc29cb448a 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -348,7 +348,7 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf Specific to the Amazon S3 service. * `s3_us_east_1_regional_endpoint` - (Optional) Specifies whether S3 API calls in the `us-east-1` Region use the legacy global endpoint or a regional endpoint. Valid values are `legacy` or `regional`. - If omitted, the default behavior is to use the global endpoint in the `us-east-1` Region. + If omitted, the default behavior in the `us-east-1` Region is to use the global endpoint for general purpose buckets and the regional endpoint for directory buckets. Can also be configured using the `AWS_S3_US_EAST_1_REGIONAL_ENDPOINT` environment variable or the `s3_us_east_1_regional_endpoint` shared config file parameter. Specific to the Amazon S3 service. * `secret_key` - (Optional) AWS secret key. Can also be set with the `AWS_SECRET_ACCESS_KEY` environment variable, or via a shared configuration and credentials files if `profile` is used. See also `access_key`. From e34a41b360f79f8def18acdceb89a2d3b0aac890 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 12 Dec 2023 17:45:08 -0500 Subject: [PATCH 103/438] s3: Use 'S3ExpressClient' in sweeper. --- internal/service/s3/sweep.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/internal/service/s3/sweep.go b/internal/service/s3/sweep.go index 2f88461742c..aacbaa0cacd 100644 --- a/internal/service/s3/sweep.go +++ b/internal/service/s3/sweep.go @@ -96,7 +96,8 @@ func sweepObjects(region string) error { } // Directory buckets. - pages := s3.NewListDirectoryBucketsPaginator(conn, &s3.ListDirectoryBucketsInput{}) + s3ExpressConn := client.S3ExpressClient(ctx) + pages := s3.NewListDirectoryBucketsPaginator(s3ExpressConn, &s3.ListDirectoryBucketsInput{}) for pages.HasMorePages() { page, err := pages.NextPage(ctx) @@ -115,7 +116,7 @@ func sweepObjects(region string) error { } sweepables = append(sweepables, directoryBucketObjectSweeper{ - conn: conn, + conn: s3ExpressConn, bucket: aws.ToString(v.Name), }) } @@ -281,7 +282,7 @@ func sweepDirectoryBuckets(region string) error { if err != nil { return fmt.Errorf("getting client: %s", err) } - conn := client.S3Client(ctx) + conn := client.S3ExpressClient(ctx) input := &s3.ListDirectoryBucketsInput{} sweepResources := make([]sweep.Sweepable, 0) From e549c9af96635e15e4437883f6094d4b08139622 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 06:16:24 +0000 Subject: [PATCH 104/438] Bump github.com/aws/aws-sdk-go in /.ci/providerlint Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.49.0 to 1.49.1. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.0...v1.49.1) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 ++-- .../github.com/aws/aws-sdk-go/aws/endpoints/defaults.go | 3 +++ .ci/providerlint/vendor/modules.txt | 2 +- 4 files changed, 7 insertions(+), 4 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 5d6f9703b60..a4d456a3715 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.0 + github.com/aws/aws-sdk-go v1.49.1 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index c979c683886..7375ababe78 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -8,8 +8,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= -github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= +github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index cdf456abe1b..b3d8f8c2c94 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -5028,6 +5028,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index 071732030d2..d681c129326 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.0 +# github.com/aws/aws-sdk-go v1.49.1 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints From 39d971049e1a7b500b284af947f23290e3e8d129 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 06:59:54 +0000 Subject: [PATCH 105/438] Bump the aws-sdk-go group with 2 updates Bumps the aws-sdk-go group with 2 updates: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) and [github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go` from 1.49.0 to 1.49.1 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.0...v1.49.1) Updates `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs` from 1.29.5 to 1.30.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.30.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.29.5...service/s3/v1.30.0) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 404f6b43c14..2440963bc66 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.0 + github.com/aws/aws-sdk-go v1.49.1 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.26.1 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 @@ -24,7 +24,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5 github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5 github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.5 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0 github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5 github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1 github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5 diff --git a/go.sum b/go.sum index b7fc7d2dddd..4ea2c29f683 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.0 h1:g9BkW1fo9GqKfwg2+zCD+TW/D36Ux+vtfJ8guF4AYmY= -github.com/aws/aws-sdk-go v1.49.0/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= +github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -72,8 +72,8 @@ github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5 h1:HH9fmVqF71UES7ES8+vAnJ github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5/go.mod h1:6nxVpS0JBdSwXDm+vo+Hwz/CJn03vu6HexNB7bQSv3Y= github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5 h1:9aS9PZ/cnTVjWDIOVqgxKd+cRxP9W1MYrQhXwh/vBec= github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5/go.mod h1:21V6X5ZV37Oel5VQZRZtxMj6jeqQr6sMbhuWu9oTaH0= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.5 h1:0yGqcpfnCyG4La+uIi3ziT/VzjxP4C7pGs39RxcGUEM= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.29.5/go.mod h1:RDU4fPO0Yb1nRUjQouqJj/bF+Ppz2XdXpWsWvxDXFS4= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0 h1:CMZz/TJgt+GMKRxjuedxhMFs45GPhyst/a/7Q3DuAg4= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0/go.mod h1:4Oeb7n2r/ApBIHphQkprve380p/RpPWBotumd44EDGg= github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5 h1:52hjOAJdIm0P2MWM14J7aLKtcT8SItEtdluW+5LbWSo= github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5/go.mod h1:8GW1bxNLHWPRwtpJKNn8z0h2N6nKgoAsN4CjeAMIrLA= github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1 h1:cyRoT4yeLGEQk8ad4Se82INAA8Xcu6xr1grQ684GYnQ= From 8bb9d1eab9fc4c64007c22f5320f6915d6bd2f52 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Dec 2023 07:00:20 +0000 Subject: [PATCH 106/438] Bump the aws-sdk-go-base group with 2 updates Bumps the aws-sdk-go-base group with 2 updates: [github.com/hashicorp/aws-sdk-go-base/v2](https://github.com/hashicorp/aws-sdk-go-base) and [github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2](https://github.com/hashicorp/aws-sdk-go-base). Updates `github.com/hashicorp/aws-sdk-go-base/v2` from 2.0.0-beta.44 to 2.0.0-beta.45 - [Release notes](https://github.com/hashicorp/aws-sdk-go-base/releases) - [Changelog](https://github.com/hashicorp/aws-sdk-go-base/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/aws-sdk-go-base/compare/v2.0.0-beta.44...v2.0.0-beta.45) Updates `github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2` from 2.0.0-beta.45 to 2.0.0-beta.46 - [Release notes](https://github.com/hashicorp/aws-sdk-go-base/releases) - [Changelog](https://github.com/hashicorp/aws-sdk-go-base/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/aws-sdk-go-base/compare/v2.0.0-beta.45...v2/awsv1shim/v2.0.0-beta.46) --- updated-dependencies: - dependency-name: github.com/hashicorp/aws-sdk-go-base/v2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go-base - dependency-name: github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go-base ... Signed-off-by: dependabot[bot] --- go.mod | 8 ++++---- go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/go.mod b/go.mod index 404f6b43c14..6d052701d16 100644 --- a/go.mod +++ b/go.mod @@ -104,8 +104,8 @@ require ( github.com/gertd/go-pluralize v0.2.1 github.com/google/go-cmp v0.6.0 github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.21.0 - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.44 - github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.45 + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.45 + github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.46 github.com/hashicorp/awspolicyequivalence v1.6.0 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637 @@ -152,8 +152,8 @@ require ( github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.2 // indirect - github.com/aws/aws-sdk-go-v2/service/iam v1.28.1 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.28.5 // indirect github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.10 // indirect diff --git a/go.sum b/go.sum index b7fc7d2dddd..bea86c79408 100644 --- a/go.sum +++ b/go.sum @@ -98,8 +98,8 @@ github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 h1:i/7aXIrjTdVZtch github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5/go.mod h1:KTFSRANgKK34D1LNNtOkPLWVgjhbx172XAQ1cDkP+08= github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 h1:ikZu83oYYnSdtc73OP1HCBXuSxQ9AXDEebHhgnTpGDA= github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5/go.mod h1:XEY63kzpXT3wMrE6yBqWCY+K1bq5Fixq32eCZYFhwpA= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.2 h1:IPMh5Selz3UKr1rY8FaNTv4Dx/Tl/G/yGpnZlhyuk+A= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.2/go.mod h1:Mj372IvfZ9ftME7Kdo74stz3KAjMA+WC7Fzzry9uCDI= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6 h1:kSdpnPOZL9NG5QHoKL5rTsdY+J+77hr+vqVMsPeyNe0= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6/go.mod h1:o7TD9sjdgrl8l/g2a2IkYjuhxjPy9DMP2sWo7piaRBQ= github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 h1:cP43vFYAQyREOp972C+6d4+dzpxo3HolNvWfeBvr2Yg= github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 h1:wLPDAUFT50NEXGXpywRU3AA74pg35RJjWol/68ruvQQ= @@ -120,8 +120,8 @@ github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5 h1:uPp9xWrEh9ui0WN1G3G7Rhgr github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5/go.mod h1:U/zNi1isGbxK7fobrqBYLUS+7BNqMtxu49bR27ZcPYQ= github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 h1:lm7KEWrkI54kso0o3qwODbJDTpEvdZyj/NoKOIheKOg= github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5/go.mod h1:5IxzIDau0tsh8NRR6wcRp8u1Xn9QY9CcD9e34lpFqEQ= -github.com/aws/aws-sdk-go-v2/service/iam v1.28.1 h1:8hPt8pYpl5SVx5dpdbqyplZcbEVsORngFT9oyz1kg20= -github.com/aws/aws-sdk-go-v2/service/iam v1.28.1/go.mod h1:mDBl4I2h0uNgx89a+Cer1TA8PN/nMO+maQYUA6nw8c4= +github.com/aws/aws-sdk-go-v2/service/iam v1.28.5 h1:Ts2eDDuMLrrmd0ARlg5zSoBQUvhdthgiNnPdiykTJs0= +github.com/aws/aws-sdk-go-v2/service/iam v1.28.5/go.mod h1:kKI0gdVsf+Ev9knh/3lBJbchtX5LLNH25lAzx3KDj3Q= github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5 h1:x93yL/0ey4Y/HEBSsqcLNQDDeIVRLOdziLMg3+YM/F8= github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5/go.mod h1:vs4IYQdGHOLq6DsPfSuoADmRzr/AeWIk8m50XBnwN/o= github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 h1:PKwE3fh67K7Kig3LlbuipQOrNSraQuEpFl09VOpaNvc= @@ -299,10 +299,10 @@ github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.21.0 h1:IUypt/TbXiJBkBbE3926CgnjD8IltAitdn7Yive61DY= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.21.0/go.mod h1:cdTE6F2pCKQobug+RqRaQp7Kz9hIEqiSvpPmb6E5G1w= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.44 h1:ejYWOQAQ13u/Cavkv3IT+fSTrHgoGatteS+hBRECFP8= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.44/go.mod h1:i9S6EzWMRpdNwqeWq81O/uN2+/56HAplgxlh3iuqmRY= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.45 h1:HARgLlRQNkuLo4og7aJ2nCN80StPwq2JHX4DkjjD+Wc= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.45/go.mod h1:fJHQmbiX3K1aSBJuW0gS9TDmqXDexWoXWyLgAN0wMfk= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.45 h1:esKaa1l2oJiARVIa20DPxgID9V7FyFfert7X1FWg1HU= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.45/go.mod h1:roO9Btzl+fvOFhvDN7CuPf6n60K6Yh0ykzwxhwbMK90= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.46 h1:fqKzv4gP8AQe89FMDU2HgmzFbjYZ9dlKMnyXdnEFIig= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.46/go.mod h1:j91OF53uFDky+CuBApWHmJ3PqLAbwEsWXckzFwc9eeY= github.com/hashicorp/awspolicyequivalence v1.6.0 h1:7aadmkalbc5ewStC6g3rljx1iNvP4QyAhg2KsHx8bU8= github.com/hashicorp/awspolicyequivalence v1.6.0/go.mod h1:9IOaIHx+a7C0NfUNk1A93M7kHd5rJ19aoUx37LZGC14= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= From 0666f80038b59d6adfe56c69a921e76d60beef5a Mon Sep 17 00:00:00 2001 From: breathingdust Date: Wed, 13 Dec 2023 09:04:10 +0000 Subject: [PATCH 107/438] docs: update resource counts --- website/docs/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index b2cab4d50eb..8a7bd3b0508 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -11,7 +11,7 @@ Use the Amazon Web Services (AWS) provider to interact with the many resources supported by AWS. You must configure the provider with the proper credentials before you can use it. -Use the navigation to the left to read about the available resources. There are currently 1291 resources and 528 data sources available in the provider. +Use the navigation to the left to read about the available resources. There are currently 1300 resources and 533 data sources available in the provider. To learn the basics of Terraform using this provider, follow the hands-on [get started tutorials](https://learn.hashicorp.com/tutorials/terraform/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, From d66dd1ed9208fa8bff756f820b188b1ca1fefdd5 Mon Sep 17 00:00:00 2001 From: Jeppe Fihl-Pearson Date: Wed, 13 Dec 2023 10:31:23 +0000 Subject: [PATCH 108/438] Mention "cloudfront-js-2.0" now is an available runtime for CloudFront funcs This seems to have been a recent addition that hasn't been announced anywhere. It was mentioned in https://aws.amazon.com/blogs/aws/introducing-amazon-cloudfront-keyvaluestore-a-low-latency-datastore-for-cloudfront-functions/ but otherwise it just seems to have appeared in the documentation: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/functions-javascript-runtime-20.html https://docs.aws.amazon.com/cloudfront/latest/APIReference/API_FunctionConfig.html I have updated the runtime version shown in the example as I assume most people would prefer to use the more recent runtime version. --- website/docs/r/cloudfront_function.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/cloudfront_function.html.markdown b/website/docs/r/cloudfront_function.html.markdown index aa6f1b1588d..df52b5627c6 100644 --- a/website/docs/r/cloudfront_function.html.markdown +++ b/website/docs/r/cloudfront_function.html.markdown @@ -21,7 +21,7 @@ See [CloudFront Functions](https://docs.aws.amazon.com/AmazonCloudFront/latest/D ```terraform resource "aws_cloudfront_function" "test" { name = "test" - runtime = "cloudfront-js-1.0" + runtime = "cloudfront-js-2.0" comment = "my function" publish = true code = file("${path.module}/function.js") @@ -34,7 +34,7 @@ The following arguments are required: * `name` - (Required) Unique name for your CloudFront Function. * `code` - (Required) Source code of the function -* `runtime` - (Required) Identifier of the function's runtime. Currently only `cloudfront-js-1.0` is valid. +* `runtime` - (Required) Identifier of the function's runtime. Valid values are `cloudfront-js-1.0` and `cloudfront-js-2.0`. The following arguments are optional: From 831611ee6d1139042b288770ce654b91f6951b08 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 05:53:59 -0500 Subject: [PATCH 109/438] Consolidate CHANGELOG entries. --- .changelog/34893.txt | 26 +------------------------- 1 file changed, 1 insertion(+), 25 deletions(-) diff --git a/.changelog/34893.txt b/.changelog/34893.txt index 37e1e3628e7..45f357f0418 100644 --- a/.changelog/34893.txt +++ b/.changelog/34893.txt @@ -1,27 +1,3 @@ ```release-note:bug -resource/aws_s3_directory_bucket: Fix `no such host` errors in `us-east-1` +provider: Always use the S3 regional endpoint in us-east-1 for S3 directory bucket operations. This fixes `no such host` errors ``` - -```release-note:bug -data-source/aws_s3_directory_buckets: Fix `no such host` errors in `us-east-1` -``` - -```release-note:bug -resource/aws_s3_bucket_policy: Fix `no such host` errors in `us-east-1` for directory buckets -``` - -```release-note:bug -resource/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory buckets -``` - -```release-note:bug -data-source/aws_s3_object: Fix `no such host` errors in `us-east-1` for directory buckets -``` - -```release-note:bug -resource/aws_s3_object_copy: Fix `no such host` errors in `us-east-1` for directory buckets -``` - -```release-note:bug -data-source/aws_s3_objects: Fix `no such host` errors in `us-east-1` for directory buckets -``` \ No newline at end of file From 58cf551d96f85df2ba75a5059d394f36e3f18ab0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 08:43:27 -0500 Subject: [PATCH 110/438] Add CHANGELOG entry. --- .changelog/34859.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34859.txt diff --git a/.changelog/34859.txt b/.changelog/34859.txt new file mode 100644 index 00000000000..13ca251c2ee --- /dev/null +++ b/.changelog/34859.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_vpc: Increase IPAM pool allocation deletion timeout from 20 minutes to 35 minutes +``` \ No newline at end of file From 8eff7cef97e81da90d0dffbfc62ef493ab402cbf Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 08:49:36 -0500 Subject: [PATCH 111/438] Update 34774.txt --- .changelog/34774.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/34774.txt b/.changelog/34774.txt index 6bf9ca533d3..caf13a8b326 100644 --- a/.changelog/34774.txt +++ b/.changelog/34774.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/appmesh_virtual_node: Remove limit of 50 backends per virtual node +resource/aws_appmesh_virtual_node: Remove limit of 50 `backend`s per virtual node ``` From 65530fc8cf2f8710b1bcecb82d63e444d70d5add Mon Sep 17 00:00:00 2001 From: Fredrik Ekre Date: Thu, 7 Dec 2023 13:54:00 +0100 Subject: [PATCH 112/438] lightsail: link to upstream documentation for zones, bundles, blueprints This patch replaces the hardcoded lists of availability zones and bundle IDs with references to AWS CLI commands that return up-to-date data. --- .../docs/r/lightsail_instance.html.markdown | 67 +++---------------- 1 file changed, 10 insertions(+), 57 deletions(-) diff --git a/website/docs/r/lightsail_instance.html.markdown b/website/docs/r/lightsail_instance.html.markdown index 88dfcbde8e3..5025ecab873 100644 --- a/website/docs/r/lightsail_instance.html.markdown +++ b/website/docs/r/lightsail_instance.html.markdown @@ -69,11 +69,16 @@ resource "aws_lightsail_instance" "test" { This resource supports the following arguments: -* `name` - (Required) The name of the Lightsail Instance. Names be unique within each AWS Region in your Lightsail account. -* `availability_zone` - (Required) The Availability Zone in which to create your -instance (see list below) -* `blueprint_id` - (Required) The ID for a virtual private server image. A list of available blueprint IDs can be obtained using the AWS CLI command: `aws lightsail get-blueprints` -* `bundle_id` - (Required) The bundle of specification information (see list below) +* `name` - (Required) The name of the Lightsail Instance. Names must be unique within each AWS Region in your Lightsail account. +* `availability_zone` - (Required) The Availability Zone in which to create your instance. A + list of available zones can be obtained using the AWS CLI command: + [`aws lightsail get-regions --include-availability-zones`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/get-regions.html). +* `blueprint_id` - (Required) The ID for a virtual private server image. A list of available + blueprint IDs can be obtained using the AWS CLI command: + [`aws lightsail get-blueprints`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/get-blueprints.html). +* `bundle_id` - (Required) The bundle of specification information. A list of available + bundle IDs can be obtained using the AWS CLI command: + [`aws lightsail get-bundles`](https://awscli.amazonaws.com/v2/documentation/api/latest/reference/lightsail/get-bundles.html). * `key_pair_name` - (Optional) The name of your key pair. Created in the Lightsail console (cannot use `aws_key_pair` at this time) * `user_data` - (Optional) Single lined launch script as a string to configure server with additional user data @@ -89,58 +94,6 @@ Defines the add on configuration for the instance. The `add_on` configuration bl * `snapshot_time` - (Required) The daily time when an automatic snapshot will be created. Must be in HH:00 format, and in an hourly increment and specified in Coordinated Universal Time (UTC). The snapshot will be automatically created between the time specified and up to 45 minutes after. * `status` - (Required) The status of the add on. Valid Values: `Enabled`, `Disabled`. -## Availability Zones - -Lightsail currently supports the following Availability Zones (e.g., `us-east-1a`): - -- `ap-northeast-1{a,c,d}` -- `ap-northeast-2{a,c}` -- `ap-south-1{a,b}` -- `ap-southeast-1{a,b,c}` -- `ap-southeast-2{a,b,c}` -- `ca-central-1{a,b}` -- `eu-central-1{a,b,c}` -- `eu-west-1{a,b,c}` -- `eu-west-2{a,b,c}` -- `eu-west-3{a,b,c}` -- `us-east-1{a,b,c,d,e,f}` -- `us-east-2{a,b,c}` -- `us-west-2{a,b,c}` - -## Bundles - -Lightsail currently supports the following Bundle IDs (e.g., an instance in `ap-northeast-1` would use `small_2_0`): - -### Prefix - -A Bundle ID starts with one of the below size prefixes: - -- `nano_` -- `micro_` -- `small_` -- `medium_` -- `large_` -- `xlarge_` -- `2xlarge_` - -### Suffix - -A Bundle ID ends with one of the following suffixes depending on Availability Zone: - -- ap-northeast-1: `2_0` -- ap-northeast-2: `2_0` -- ap-south-1: `2_1` -- ap-southeast-1: `2_0` -- ap-southeast-2: `2_2` -- ca-central-1: `2_0` -- eu-central-1: `2_0` -- eu-west-1: `2_0` -- eu-west-2: `2_0` -- eu-west-3: `2_0` -- us-east-1: `2_0` -- us-east-2: `2_0` -- us-west-2: `2_0` - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: From 06563b6e803204df073ae12c0aa8d07226c56be5 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 13 Dec 2023 14:19:36 +0000 Subject: [PATCH 113/438] Update CHANGELOG.md for #34899 --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 38a1f669498..bb4ebc68010 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,9 +8,11 @@ FEATURES: ENHANCEMENTS: * data-source/aws_cloudwatch_log_group: Add `log_group_class` attribute ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) +* data-source/aws_lb: Add `connection_logs` attribute ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * data-source/aws_lb: Add `dns_record_client_routing_policy` attribute ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) * data-source/aws_opensearchserverless_collection: Add `standby_replicas` attribute ([#34677](https://github.com/hashicorp/terraform-provider-aws/issues/34677)) * resource/aws_db_instance: Add support for IBM Db2 databases ([#34834](https://github.com/hashicorp/terraform-provider-aws/issues/34834)) +* resource/aws_lb: Add `connection_logs` configuration block ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * resource/aws_lb: Add plan-time validation that exactly one of either `subnets` or `subnet_mapping` is configured ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Allow the number of `subnet_mapping`s for Application Load Balancers to be changed without recreating the resource ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Allow the number of `subnet_mapping`s for Network Load Balancers to be increased without recreating the resource ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) @@ -20,6 +22,7 @@ ENHANCEMENTS: BUG FIXES: * data-source/aws_ecr_pull_through_cache_rule: Fix plan time validation for `ecr_repository_prefix` ([#34716](https://github.com/hashicorp/terraform-provider-aws/issues/34716)) +* resource/aws_appmesh_virtual_node: Remove limit of 50 `backend`s per virtual node ([#34774](https://github.com/hashicorp/terraform-provider-aws/issues/34774)) * resource/aws_cloudwatch_log_group: Fix `invalid new value for .skip_destroy: was cty.False, but now null` errors ([#30354](https://github.com/hashicorp/terraform-provider-aws/issues/30354)) * resource/aws_cloudwatch_log_group: Remove default value (`STANDARD`) for `log_group_class` argument and mark as Computed. This fixes `InvalidParameterException: Only Standard log class is supported` errors in AWS Regions other than AWS Commercial ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) * resource/aws_db_instance: Fix error where Terraform loses track of resource if Blue/Green Deployment is applied outside of Terraform ([#34728](https://github.com/hashicorp/terraform-provider-aws/issues/34728)) @@ -28,6 +31,7 @@ BUG FIXES: * resource/aws_lb: Fix `InvalidConfigurationRequest: Load balancer attribute key 'dns_record.client_routing_policy' is not supported on load balancers with type 'network'` errors on resource Create in AWS GovCloud (US) ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) * resource/aws_medialive_channel: Fixed errors related to setting the `failover_condition` argument ([#33410](https://github.com/hashicorp/terraform-provider-aws/issues/33410)) * resource/aws_securitylake_data_lake: Fix `reflect.Set: value of type basetypes.StringValue is not assignable to type types.ARN` panic when importing resources with `nil` ARN fields ([#34820](https://github.com/hashicorp/terraform-provider-aws/issues/34820)) +* resource/aws_vpc: Increase IPAM pool allocation deletion timeout from 20 minutes to 35 minutes ([#34859](https://github.com/hashicorp/terraform-provider-aws/issues/34859)) ## 5.30.0 (December 7, 2023) From 2bb4795331255f8c49edd4b51581c44e4f6a4955 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 09:21:00 -0500 Subject: [PATCH 114/438] d/aws_s3_objects: Fix 'Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`' when using S3 access point. --- internal/service/s3/objects_data_source.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/internal/service/s3/objects_data_source.go b/internal/service/s3/objects_data_source.go index ba3722362e1..357cb9c9856 100644 --- a/internal/service/s3/objects_data_source.go +++ b/internal/service/s3/objects_data_source.go @@ -7,6 +7,7 @@ import ( "context" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -14,6 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/names" ) const keyRequestPageSize = 1000 @@ -85,11 +87,16 @@ func DataSourceObjects() *schema.Resource { func dataSourceObjectsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } input := &s3.ListObjectsV2Input{ Bucket: aws.String(bucket), } @@ -133,7 +140,7 @@ func dataSourceObjectsRead(ctx context.Context, d *schema.ResourceData, meta int pages := s3.NewListObjectsV2Paginator(conn, input) pageLoop: for pages.HasMorePages() { - page, err := pages.NextPage(ctx) + page, err := pages.NextPage(ctx, optFns...) if err != nil { return sdkdiag.AppendErrorf(diags, "listing S3 Bucket (%s) Objects: %s", bucket, err) From b4eacaeebcb660e3cf283b77c2f4c836a8faafbf Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 10:45:01 -0500 Subject: [PATCH 115/438] Add CHANGELOG entry. --- .changelog/34724.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34724.txt diff --git a/.changelog/34724.txt b/.changelog/34724.txt new file mode 100644 index 00000000000..2f038716a38 --- /dev/null +++ b/.changelog/34724.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dms_endpoint: Add `postgres_settings` configuration block +``` \ No newline at end of file From ede566db97680655a8f249de312af35638d09331 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 10:45:42 -0500 Subject: [PATCH 116/438] d/aws_s3_object: Fix 'Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`' when using S3 access point. --- internal/service/s3/object.go | 4 ++-- internal/service/s3/object_data_source.go | 13 ++++++++++--- internal/service/s3/tags.go | 12 ++++++------ 3 files changed, 18 insertions(+), 11 deletions(-) diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index 2f584184064..0d5e7e72d37 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -666,8 +666,8 @@ func findObjectByBucketAndKey(ctx context.Context, conn *s3.Client, bucket, key, return findObject(ctx, conn, input) } -func findObject(ctx context.Context, conn *s3.Client, input *s3.HeadObjectInput) (*s3.HeadObjectOutput, error) { - output, err := conn.HeadObject(ctx, input) +func findObject(ctx context.Context, conn *s3.Client, input *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { + output, err := conn.HeadObject(ctx, input, optFns...) if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) { return nil, &retry.NotFoundError{ diff --git a/internal/service/s3/object_data_source.go b/internal/service/s3/object_data_source.go index 2e58ecf97ec..0b227821466 100644 --- a/internal/service/s3/object_data_source.go +++ b/internal/service/s3/object_data_source.go @@ -12,6 +12,7 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" @@ -22,6 +23,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKDataSource("aws_s3_object") @@ -157,12 +159,17 @@ func DataSourceObject() *schema.Resource { func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) ignoreTagsConfig := meta.(*conns.AWSClient).IgnoreTagsConfig bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) input := &s3.HeadObjectInput{ Bucket: aws.String(bucket), @@ -178,7 +185,7 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte input.VersionId = aws.String(v.(string)) } - output, err := findObject(ctx, conn, input) + output, err := findObject(ctx, conn, input, optFns...) if err != nil { return sdkdiag.AppendErrorf(diags, "reading S3 Bucket (%s) Object (%s): %s", bucket, key, err) @@ -234,7 +241,7 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set("website_redirect_location", output.WebsiteRedirectLocation) if isContentTypeAllowed(output.ContentType) { - downloader := manager.NewDownloader(conn) + downloader := manager.NewDownloader(conn, manager.WithDownloaderClientOptions(optFns...)) buf := manager.NewWriteAtBuffer(make([]byte, 0)) input := &s3.GetObjectInput{ Bucket: aws.String(bucket), @@ -254,7 +261,7 @@ func dataSourceObjectRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set("body", string(buf.Bytes())) } - if tags, err := ObjectListTags(ctx, conn, bucket, key); err == nil { + if tags, err := ObjectListTags(ctx, conn, bucket, key, optFns...); err == nil { if err := d.Set("tags", tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig).Map()); err != nil { return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } diff --git a/internal/service/s3/tags.go b/internal/service/s3/tags.go index 690521b27fd..8707d4a3ecf 100644 --- a/internal/service/s3/tags.go +++ b/internal/service/s3/tags.go @@ -90,13 +90,13 @@ func BucketUpdateTags(ctx context.Context, conn s3iface_sdkv1.S3API, identifier } // ObjectListTags lists S3 object tags. -func ObjectListTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key string) (tftags.KeyValueTags, error) { +func ObjectListTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key string, optFns ...func(*s3_sdkv2.Options)) (tftags.KeyValueTags, error) { input := &s3_sdkv2.GetObjectTaggingInput{ Bucket: aws_sdkv2.String(bucket), Key: aws_sdkv2.String(key), } - output, err := conn.GetObjectTagging(ctx, input) + output, err := conn.GetObjectTagging(ctx, input, optFns...) if tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError) { return tftags.New(ctx, nil), nil @@ -110,12 +110,12 @@ func ObjectListTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key stri } // ObjectUpdateTags updates S3 object tags. -func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key string, oldTagsMap, newTagsMap any) error { +func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key string, oldTagsMap, newTagsMap any, optFns ...func(*s3_sdkv2.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) // We need to also consider any existing ignored tags. - allTags, err := ObjectListTags(ctx, conn, bucket, key) + allTags, err := ObjectListTags(ctx, conn, bucket, key, optFns...) if err != nil { return fmt.Errorf("listing resource tags (%s/%s): %w", bucket, key, err) @@ -132,7 +132,7 @@ func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key st }, } - _, err := conn.PutObjectTagging(ctx, input) + _, err := conn.PutObjectTagging(ctx, input, optFns...) if err != nil { return fmt.Errorf("setting resource tags (%s/%s): %w", bucket, key, err) @@ -143,7 +143,7 @@ func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key st Key: aws_sdkv2.String(key), } - _, err := conn.DeleteObjectTagging(ctx, input) + _, err := conn.DeleteObjectTagging(ctx, input, optFns...) if err != nil { return fmt.Errorf("deleting resource tags (%s/%s): %w", bucket, key, err) From 4d6f6e394d0ebac90cf013be92807fcdf56899b4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 10:47:53 -0500 Subject: [PATCH 117/438] Fix terrafmt error. --- internal/service/dms/endpoint_test.go | 29 ++++++++++++++------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index aa8d130a80b..93910c75b3e 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -3876,21 +3876,22 @@ resource "aws_dms_endpoint" "test" { database_name = "tftest" ssl_mode = "require" extra_connection_attributes = "" + postgres_settings { - after_connect_script = "SET search_path TO pg_catalog,public;" - capture_ddls = true - ddl_artifacts_schema = true - execute_timeout = 100 - fail_tasks_on_lob_truncation = false - heartbeat_enable = true - heartbeat_frequency = 5 - heartbeat_schema = "test" - map_boolean_as_boolean = true - map_jsonb_as_clob = true - map_long_varchar_as = "wstring" - max_file_size = 1024 - plugin_name = "pglogical" - slot_name = "test" + after_connect_script = "SET search_path TO pg_catalog,public;" + capture_ddls = true + ddl_artifacts_schema = true + execute_timeout = 100 + fail_tasks_on_lob_truncation = false + heartbeat_enable = true + heartbeat_frequency = 5 + heartbeat_schema = "test" + map_boolean_as_boolean = true + map_jsonb_as_clob = true + map_long_varchar_as = "wstring" + max_file_size = 1024 + plugin_name = "pglogical" + slot_name = "test" } } `, rName) From 2a7237b764291b58279813b7b1db741445dd5ce9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 10:52:51 -0500 Subject: [PATCH 118/438] r/aws_dms_endpoint: Cosmetics. --- internal/service/dms/endpoint.go | 63 +++++++++++++++++--------------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 47d38d9f560..aaf6b2e6e47 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -826,26 +826,27 @@ func resourceEndpointCreate(ctx context.Context, d *schema.ResourceData, meta in expandTopLevelConnectionInfo(d, input) } case engineNameAuroraPostgresql, engineNamePostgres: - postgres_settings := &dms.PostgreSQLSettings{} + settings := &dms.PostgreSQLSettings{} if _, ok := d.GetOk("postgres_settings"); ok { - postgres_settings = expandPostgreSQLSettings(d.Get("postgres_settings").([]interface{})[0].(map[string]interface{})) + settings = expandPostgreSQLSettings(d.Get("postgres_settings").([]interface{})[0].(map[string]interface{})) } if _, ok := d.GetOk("secrets_manager_arn"); ok { - postgres_settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) - postgres_settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) - postgres_settings.DatabaseName = aws.String(d.Get("database_name").(string)) + settings.SecretsManagerAccessRoleArn = aws.String(d.Get("secrets_manager_access_role_arn").(string)) + settings.SecretsManagerSecretId = aws.String(d.Get("secrets_manager_arn").(string)) + settings.DatabaseName = aws.String(d.Get("database_name").(string)) } else { - postgres_settings.Username = aws.String(d.Get("username").(string)) - postgres_settings.Password = aws.String(d.Get("password").(string)) - postgres_settings.ServerName = aws.String(d.Get("server_name").(string)) - postgres_settings.Port = aws.Int64(int64(d.Get("port").(int))) - postgres_settings.DatabaseName = aws.String(d.Get("database_name").(string)) + settings.Username = aws.String(d.Get("username").(string)) + settings.Password = aws.String(d.Get("password").(string)) + settings.ServerName = aws.String(d.Get("server_name").(string)) + settings.Port = aws.Int64(int64(d.Get("port").(int))) + settings.DatabaseName = aws.String(d.Get("database_name").(string)) // Set connection info in top-level namespace as well expandTopLevelConnectionInfo(d, input) } - input.PostgreSQLSettings = postgres_settings + + input.PostgreSQLSettings = settings case engineNameDynamoDB: input.DynamoDbSettings = &dms.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), @@ -2141,58 +2142,62 @@ func flattenRedshiftSettings(settings *dms.RedshiftSettings) []map[string]interf } func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSettings { - settings := &dms.PostgreSQLSettings{} + if tfMap == nil { + return nil + } + + apiObject := &dms.PostgreSQLSettings{} if v, ok := tfMap["after_connect_script"].(string); ok && v != "" { - settings.AfterConnectScript = aws.String(v) + apiObject.AfterConnectScript = aws.String(v) } if v, ok := tfMap["babelfish_database_name"].(string); ok && v != "" { - settings.BabelfishDatabaseName = aws.String(v) + apiObject.BabelfishDatabaseName = aws.String(v) } if v, ok := tfMap["capture_ddls"].(bool); ok { - settings.CaptureDdls = aws.Bool(v) + apiObject.CaptureDdls = aws.Bool(v) } if v, ok := tfMap["database_mode"].(string); ok && v != "" { - settings.DatabaseMode = aws.String(v) + apiObject.DatabaseMode = aws.String(v) } if v, ok := tfMap["ddl_artifacts_schema"].(string); ok && v != "" { - settings.DdlArtifactsSchema = aws.String(v) + apiObject.DdlArtifactsSchema = aws.String(v) } if v, ok := tfMap["execute_timeout"].(int); ok { - settings.ExecuteTimeout = aws.Int64(int64(v)) + apiObject.ExecuteTimeout = aws.Int64(int64(v)) } if v, ok := tfMap["fail_tasks_on_lob_truncation"].(bool); ok { - settings.FailTasksOnLobTruncation = aws.Bool(v) + apiObject.FailTasksOnLobTruncation = aws.Bool(v) } if v, ok := tfMap["heartbeat_enable"].(bool); ok { - settings.HeartbeatEnable = aws.Bool(v) + apiObject.HeartbeatEnable = aws.Bool(v) } if v, ok := tfMap["heartbeat_frequency"].(int); ok { - settings.HeartbeatFrequency = aws.Int64(int64(v)) + apiObject.HeartbeatFrequency = aws.Int64(int64(v)) } if v, ok := tfMap["heartbeat_schema"].(string); ok && v != "" { - settings.HeartbeatSchema = aws.String(v) + apiObject.HeartbeatSchema = aws.String(v) } if v, ok := tfMap["map_boolean_as_boolean"].(bool); ok { - settings.MapBooleanAsBoolean = aws.Bool(v) + apiObject.MapBooleanAsBoolean = aws.Bool(v) } if v, ok := tfMap["map_jsonb_as_clob"].(bool); ok { - settings.MapJsonbAsClob = aws.Bool(v) + apiObject.MapJsonbAsClob = aws.Bool(v) } if v, ok := tfMap["map_long_varchar_as"].(string); ok && v != "" { - settings.MapLongVarcharAs = aws.String(v) + apiObject.MapLongVarcharAs = aws.String(v) } if v, ok := tfMap["max_file_size"].(int); ok { - settings.MaxFileSize = aws.Int64(int64(v)) + apiObject.MaxFileSize = aws.Int64(int64(v)) } if v, ok := tfMap["plugin_name"].(string); ok && v != "" { - settings.PluginName = aws.String(v) + apiObject.PluginName = aws.String(v) } if v, ok := tfMap["slot_name"].(string); ok && v != "" { - settings.SlotName = aws.String(v) + apiObject.SlotName = aws.String(v) } - return settings + return apiObject } func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { From 174b0ae0eb5442a678f867476b67eea53c6e87f8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 11:24:54 -0500 Subject: [PATCH 119/438] r/aws_dms_endpoint: Add 'flattenPostgreSQLSettings'. --- internal/service/dms/endpoint.go | 62 ++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index aaf6b2e6e47..746c958b0e9 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1553,6 +1553,9 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) er } else { flattenTopLevelConnectionInfo(d, endpoint) } + if err := d.Set("postgres_settings", flattenPostgreSQLSettings(endpoint.PostgreSQLSettings)); err != nil { + return fmt.Errorf("setting postgres_settings: %w", err) + } case engineNameDynamoDB: if endpoint.DynamoDbSettings != nil { d.Set("service_access_role", endpoint.DynamoDbSettings.ServiceAccessRoleArn) @@ -2200,6 +2203,65 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti return apiObject } +func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) map[string]interface{} { + if apiObject == nil { + return nil + } + + tfMap := map[string]interface{}{} + + if v := apiObject.AfterConnectScript; v != nil { + tfMap["after_connect_script"] = aws.StringValue(v) + } + if v := apiObject.BabelfishDatabaseName; v != nil { + tfMap["babelfish_database_name"] = aws.StringValue(v) + } + if v := apiObject.CaptureDdls; v != nil { + tfMap["capture_ddls"] = aws.BoolValue(v) + } + if v := apiObject.DatabaseMode; v != nil { + tfMap["database_mode"] = aws.StringValue(v) + } + if v := apiObject.DdlArtifactsSchema; v != nil { + tfMap["ddl_artifacts_schema"] = aws.StringValue(v) + } + if v := apiObject.ExecuteTimeout; v != nil { + tfMap["execute_timeout"] = aws.Int64Value(v) + } + if v := apiObject.FailTasksOnLobTruncation; v != nil { + tfMap["fail_tasks_on_lob_truncation"] = aws.BoolValue(v) + } + if v := apiObject.HeartbeatEnable; v != nil { + tfMap["heartbeat_enable"] = aws.BoolValue(v) + } + if v := apiObject.HeartbeatFrequency; v != nil { + tfMap["heartbeat_frequency"] = aws.Int64Value(v) + } + if v := apiObject.HeartbeatSchema; v != nil { + tfMap["heartbeat_schema"] = aws.StringValue(v) + } + if v := apiObject.MapBooleanAsBoolean; v != nil { + tfMap["map_boolean_as_boolean"] = aws.BoolValue(v) + } + if v := apiObject.MapJsonbAsClob; v != nil { + tfMap["map_jsonb_as_clob"] = aws.BoolValue(v) + } + if v := apiObject.MapLongVarcharAs; v != nil { + tfMap["map_long_varchar_as"] = aws.StringValue(v) + } + if v := apiObject.MaxFileSize; v != nil { + tfMap["max_file_size"] = aws.Int64Value(v) + } + if v := apiObject.PluginName; v != nil { + tfMap["plugin_name"] = aws.StringValue(v) + } + if v := apiObject.SlotName; v != nil { + tfMap["slot_name"] = aws.StringValue(v) + } + + return tfMap +} + func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { if tfMap == nil { return nil From 82657c74637caeb0cccca39f81d0fc1a7b2cf7da Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 11:31:03 -0500 Subject: [PATCH 120/438] r/aws_s3_object: Fix 'Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`' when using S3 access point. --- internal/service/s3/delete.go | 20 ++++----- internal/service/s3/object.go | 70 ++++++++++++++++++++---------- internal/service/s3/object_copy.go | 2 +- internal/service/s3/object_test.go | 54 +++++++++++++++++++---- 4 files changed, 103 insertions(+), 43 deletions(-) diff --git a/internal/service/s3/delete.go b/internal/service/s3/delete.go index 8df4f6b8bce..30fd747f134 100644 --- a/internal/service/s3/delete.go +++ b/internal/service/s3/delete.go @@ -314,7 +314,7 @@ func newDeleteObjectVersionError(err types.Error) error { // Set `force` to `true` to override any S3 object lock protections on object lock enabled buckets. // Returns the number of objects deleted. // Use `emptyBucket` to delete all versions of all objects in a bucket. -func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key string, force, ignoreObjectErrors bool) (int64, error) { +func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key string, force, ignoreObjectErrors bool, optFns ...func(*s3.Options)) (int64, error) { if key == "" { return 0, errors.New("use `emptyBucket` to delete all versions of all objects in an S3 general purpose bucket") } @@ -328,7 +328,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s pages := s3.NewListObjectVersionsPaginator(conn, input) for pages.HasMorePages() { - page, err := pages.NextPage(ctx) + page, err := pages.NextPage(ctx, optFns...) if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { break @@ -346,7 +346,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s continue } - err := deleteObjectVersion(ctx, conn, bucket, objectKey, objectVersionID, force) + err := deleteObjectVersion(ctx, conn, bucket, objectKey, objectVersionID, force, optFns...) if err == nil { nObjects++ @@ -360,7 +360,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s VersionId: aws.String(objectVersionID), } - output, err := conn.HeadObject(ctx, input) + output, err := conn.HeadObject(ctx, input, optFns...) if err != nil { log.Printf("[ERROR] Getting S3 Bucket (%s) Object (%s) Version (%s) metadata: %s", bucket, objectKey, objectVersionID, err) @@ -378,7 +378,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s VersionId: aws.String(objectVersionID), } - _, err := conn.PutObjectLegalHold(ctx, input) + _, err := conn.PutObjectLegalHold(ctx, input, optFns...) if err != nil { log.Printf("[ERROR] Putting S3 Bucket (%s) Object (%s) Version(%s) legal hold: %s", bucket, objectKey, objectVersionID, err) @@ -387,7 +387,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s } // Attempt to delete again. - err = deleteObjectVersion(ctx, conn, bucket, objectKey, objectVersionID, force) + err = deleteObjectVersion(ctx, conn, bucket, objectKey, objectVersionID, force, optFns...) if err != nil { lastErr = err @@ -419,7 +419,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s pages = s3.NewListObjectVersionsPaginator(conn, input) for pages.HasMorePages() { - page, err := pages.NextPage(ctx) + page, err := pages.NextPage(ctx, optFns...) if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { break @@ -438,7 +438,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s } // Delete markers have no object lock protections. - err := deleteObjectVersion(ctx, conn, bucket, deleteMarkerKey, deleteMarkerVersionID, false) + err := deleteObjectVersion(ctx, conn, bucket, deleteMarkerKey, deleteMarkerVersionID, false, optFns...) if err != nil { lastErr = err @@ -459,7 +459,7 @@ func deleteAllObjectVersions(ctx context.Context, conn *s3.Client, bucket, key s // deleteObjectVersion deletes a specific object version. // Set `force` to `true` to override any S3 object lock protections. -func deleteObjectVersion(ctx context.Context, conn *s3.Client, b, k, v string, force bool) error { +func deleteObjectVersion(ctx context.Context, conn *s3.Client, b, k, v string, force bool, optFns ...func(*s3.Options)) error { input := &s3.DeleteObjectInput{ Bucket: aws.String(b), Key: aws.String(k), @@ -473,7 +473,7 @@ func deleteObjectVersion(ctx context.Context, conn *s3.Client, b, k, v string, f } log.Printf("[INFO] Deleting S3 Bucket (%s) Object (%s) Version (%s)", b, k, v) - _, err := conn.DeleteObject(ctx, input) + _, err := conn.DeleteObject(ctx, input, optFns...) if err != nil { log.Printf("[WARN] Deleting S3 Bucket (%s) Object (%s) Version (%s): %s", b, k, v, err) diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index 0d5e7e72d37..7a4c7ed4cb8 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -17,6 +17,7 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" @@ -252,13 +253,18 @@ func resourceObjectCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) - output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string)) + output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string), optFns...) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Object (%s) not found, removing from state", d.Id()) @@ -296,11 +302,11 @@ func resourceObjectRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("version_id", output.VersionId) d.Set("website_redirect", output.WebsiteRedirectLocation) - if err := resourceObjectSetKMS(ctx, d, meta, output.SSEKMSKeyId); err != nil { + if err := resourceObjectSetKMS(ctx, meta, d, aws.ToString(output.SSEKMSKeyId)); err != nil { return sdkdiag.AppendFromErr(diags, err) } - if tags, err := ObjectListTags(ctx, conn, bucket, key); err == nil { + if tags, err := ObjectListTags(ctx, conn, bucket, key, optFns...); err == nil { setTagsOut(ctx, Tags(tags)) } else if !tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { // Directory buckets return HTTP status code 501, NotImplemented. return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) @@ -316,11 +322,16 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte } conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) if d.HasChange("acl") { @@ -330,7 +341,7 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte Key: aws.String(key), } - _, err := conn.PutObjectAcl(ctx, input) + _, err := conn.PutObjectAcl(ctx, input, optFns...) if err != nil { return sdkdiag.AppendErrorf(diags, "putting S3 Object (%s) ACL: %s", d.Id(), err) @@ -346,7 +357,7 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte }, } - _, err := conn.PutObjectLegalHold(ctx, input) + _, err := conn.PutObjectLegalHold(ctx, input, optFns...) if err != nil { return sdkdiag.AppendErrorf(diags, "putting S3 Object (%s) legal hold: %s", d.Id(), err) @@ -373,7 +384,7 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte } } - _, err := conn.PutObjectRetention(ctx, input) + _, err := conn.PutObjectRetention(ctx, input, optFns...) if err != nil { return sdkdiag.AppendErrorf(diags, "putting S3 Object (%s) retention: %s", d.Id(), err) @@ -383,7 +394,7 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte if d.HasChange("tags_all") { o, n := d.GetChange("tags_all") - if err := ObjectUpdateTags(ctx, conn, bucket, key, o, n); err != nil { + if err := ObjectUpdateTags(ctx, conn, bucket, key, o, n, optFns...); err != nil { return sdkdiag.AppendErrorf(diags, "updating tags: %s", err) } } @@ -394,18 +405,23 @@ func resourceObjectUpdate(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) var err error if _, ok := d.GetOk("version_id"); ok { - _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) + _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false, optFns...) } else { - err = deleteObjectVersion(ctx, conn, bucket, key, "", false) + err = deleteObjectVersion(ctx, conn, bucket, key, "", false, optFns...) } if err != nil { @@ -437,14 +453,20 @@ func resourceObjectImport(ctx context.Context, d *schema.ResourceData, meta inte func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } - uploader := manager.NewUploader(conn) - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig - tags := tftags.New(ctx, d.Get("tags").(map[string]interface{})) + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + uploader := manager.NewUploader(conn, manager.WithUploaderRequestOptions(optFns...)) + tags := tftags.New(ctx, d.Get("tags").(map[string]interface{})) if ignoreProviderDefaultTags(ctx, d) { tags = tags.RemoveDefaultConfig(defaultTagsConfig) } else { @@ -580,19 +602,19 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte return append(diags, resourceObjectRead(ctx, d, meta)...) } -func resourceObjectSetKMS(ctx context.Context, d *schema.ResourceData, meta interface{}, sseKMSKeyId *string) error { - // Only set non-default KMS key ID (one that doesn't match default) - if sseKMSKeyId != nil { - // retrieve S3 KMS Default Master Key - conn := meta.(*conns.AWSClient).KMSConn(ctx) - keyMetadata, err := kms.FindKeyByID(ctx, conn, DefaultKMSKeyAlias) +func resourceObjectSetKMS(ctx context.Context, meta interface{}, d *schema.ResourceData, sseKMSKeyID string) error { + // Only set non-default KMS key ID (one that doesn't match default). + if sseKMSKeyID != "" { + // Read S3 KMS default master key. + keyMetadata, err := kms.FindKeyByID(ctx, meta.(*conns.AWSClient).KMSConn(ctx), DefaultKMSKeyAlias) + if err != nil { - return fmt.Errorf("Failed to describe default S3 KMS key (%s): %s", DefaultKMSKeyAlias, err) + return fmt.Errorf("reading default S3 KMS key (%s): %s", DefaultKMSKeyAlias, err) } - if kmsKeyID := aws.ToString(sseKMSKeyId); kmsKeyID != aws.ToString(keyMetadata.Arn) { - log.Printf("[DEBUG] S3 object is encrypted using a non-default KMS Key ID: %s", kmsKeyID) - d.Set("kms_key_id", sseKMSKeyId) + if sseKMSKeyID != aws.ToString(keyMetadata.Arn) { + log.Printf("[DEBUG] S3 object is encrypted using a non-default KMS key: %s", sseKMSKeyID) + d.Set("kms_key_id", sseKMSKeyID) } } @@ -651,7 +673,7 @@ func hasObjectContentChanges(d verify.ResourceDiffer) bool { return false } -func findObjectByBucketAndKey(ctx context.Context, conn *s3.Client, bucket, key, etag, checksumAlgorithm string) (*s3.HeadObjectOutput, error) { +func findObjectByBucketAndKey(ctx context.Context, conn *s3.Client, bucket, key, etag, checksumAlgorithm string, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { input := &s3.HeadObjectInput{ Bucket: aws.String(bucket), Key: aws.String(key), @@ -663,7 +685,7 @@ func findObjectByBucketAndKey(ctx context.Context, conn *s3.Client, bucket, key, input.IfMatch = aws.String(etag) } - return findObject(ctx, conn, input) + return findObject(ctx, conn, input, optFns...) } func findObject(ctx context.Context, conn *s3.Client, input *s3.HeadObjectInput, optFns ...func(*s3.Options)) (*s3.HeadObjectOutput, error) { diff --git a/internal/service/s3/object_copy.go b/internal/service/s3/object_copy.go index d38344ed3f0..92fa8f03f6c 100644 --- a/internal/service/s3/object_copy.go +++ b/internal/service/s3/object_copy.go @@ -381,7 +381,7 @@ func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("version_id", output.VersionId) d.Set("website_redirect", output.WebsiteRedirectLocation) - if err := resourceObjectSetKMS(ctx, d, meta, output.SSEKMSKeyId); err != nil { + if err := resourceObjectSetKMS(ctx, meta, d, aws.ToString(output.SSEKMSKeyId)); err != nil { return sdkdiag.AppendFromErr(diags, err) } diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index 28cdbe57514..6c1917bb19b 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -15,6 +15,7 @@ import ( "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/google/go-cmp/cmp" @@ -1855,7 +1856,12 @@ func testAccCheckObjectDestroy(ctx context.Context) resource.TestCheckFunc { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], rs.Primary.Attributes["checksum_algorithm"]) + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], rs.Primary.Attributes["checksum_algorithm"], optFns...) if tfresource.NotFound(err) { continue @@ -1884,13 +1890,18 @@ func testAccCheckObjectExists(ctx context.Context, n string, v *s3.GetObjectOutp conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + input := &s3.GetObjectInput{ Bucket: aws.String(rs.Primary.Attributes["bucket"]), Key: aws.String(tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"])), IfMatch: aws.String(rs.Primary.Attributes["etag"]), } - output, err := conn.GetObject(ctx, input) + output, err := conn.GetObject(ctx, input, optFns...) if err != nil { return err @@ -1927,10 +1938,17 @@ func testAccCheckObjectACL(ctx context.Context, n string, want []string) resourc conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - output, err := conn.GetObjectAcl(ctx, &s3.GetObjectAclInput{ + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + input := &s3.GetObjectAclInput{ Bucket: aws.String(rs.Primary.Attributes["bucket"]), Key: aws.String(tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"])), - }) + } + + output, err := conn.GetObjectAcl(ctx, input, optFns...) if err != nil { return err @@ -1959,7 +1977,12 @@ func testAccCheckObjectStorageClass(ctx context.Context, n, want string) resourc conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - output, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), "", "") + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + output, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), "", "", optFns...) if err != nil { return err @@ -1989,7 +2012,12 @@ func testAccCheckObjectSSE(ctx context.Context, n, want string) resource.TestChe conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - output, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), "", "") + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + output, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), "", "", optFns...) if err != nil { return err @@ -2028,7 +2056,12 @@ func testAccCheckObjectUpdateTags(ctx context.Context, n string, oldTags, newTag conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - return tfs3.ObjectUpdateTags(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), oldTags, newTags) + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + return tfs3.ObjectUpdateTags(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), oldTags, newTags, optFns...) } } @@ -2041,7 +2074,12 @@ func testAccCheckObjectCheckTags(ctx context.Context, n string, expectedTags map conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - got, err := tfs3.ObjectListTags(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"])) + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + got, err := tfs3.ObjectListTags(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), optFns...) if err != nil { return err } From 0fbe14782206debef8102cedc68bc136cc5b29b9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 11:52:58 -0500 Subject: [PATCH 121/438] Fix 'flattenPostgreSQLSettings' return value. --- internal/service/dms/endpoint.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 746c958b0e9..fd284168cee 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -2203,7 +2203,7 @@ func expandPostgreSQLSettings(tfMap map[string]interface{}) *dms.PostgreSQLSetti return apiObject } -func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) map[string]interface{} { +func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) []map[string]interface{} { if apiObject == nil { return nil } @@ -2259,7 +2259,7 @@ func flattenPostgreSQLSettings(apiObject *dms.PostgreSQLSettings) map[string]int tfMap["slot_name"] = aws.StringValue(v) } - return tfMap + return []map[string]interface{}{tfMap} } func expandS3Settings(tfMap map[string]interface{}) *dms.S3Settings { From 2911b4f329b4d9cb0413e2edbc6ded90a3573395 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 11:54:00 -0500 Subject: [PATCH 122/438] Fix terrafmt errors. --- internal/service/dms/endpoint_test.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 93910c75b3e..8dc8d0d08d1 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -3910,12 +3910,13 @@ resource "aws_dms_endpoint" "test" { database_name = "tftest" ssl_mode = "require" extra_connection_attributes = "" + postgres_settings { - after_connect_script = "SET search_path TO pg_catalog,public;" - babelfish_database_name = "babelfish" - database_mode = "babelfish" - execute_timeout = 100 - max_file_size = 1024 + after_connect_script = "SET search_path TO pg_catalog,public;" + babelfish_database_name = "babelfish" + database_mode = "babelfish" + execute_timeout = 100 + max_file_size = 1024 } } `, rName) From 2e68eda235e3c5c810718ad837e4a3437d1f8918 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 11:54:58 -0500 Subject: [PATCH 123/438] Add 'TestAccS3ObjectCopy_basicViaAccessPoint'. --- internal/service/s3/object_copy_test.go | 126 ++++++++++++++++++++++++ 1 file changed, 126 insertions(+) diff --git a/internal/service/s3/object_copy_test.go b/internal/service/s3/object_copy_test.go index fd3f0469937..aaea71cd0b4 100644 --- a/internal/service/s3/object_copy_test.go +++ b/internal/service/s3/object_copy_test.go @@ -509,6 +509,80 @@ func TestAccS3ObjectCopy_directoryBucket(t *testing.T) { }) } +func TestAccS3ObjectCopy_basicViaAccessPoint(t *testing.T) { + ctx := acctest.Context(t) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_s3_object_copy.test" + sourceName := "aws_s3_object.source" + sourceKey := "source" + targetKey := "target" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckObjectCopyDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccObjectCopyConfig_basicViaAccessPoint(rName1, sourceKey, rName2, targetKey), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckObjectCopyExists(ctx, resourceName), + resource.TestCheckNoResourceAttr(resourceName, "acl"), + resource.TestCheckResourceAttrSet(resourceName, "bucket"), + resource.TestCheckResourceAttr(resourceName, "bucket_key_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "cache_control", ""), + resource.TestCheckNoResourceAttr(resourceName, "checksum_algorithm"), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_crc32c", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha1", ""), + resource.TestCheckResourceAttr(resourceName, "checksum_sha256", ""), + resource.TestCheckResourceAttr(resourceName, "content_disposition", ""), + resource.TestCheckResourceAttr(resourceName, "content_encoding", ""), + resource.TestCheckResourceAttr(resourceName, "content_language", ""), + resource.TestCheckResourceAttr(resourceName, "content_type", "application/octet-stream"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_match"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_modified_since"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_none_match"), + resource.TestCheckNoResourceAttr(resourceName, "copy_if_unmodified_since"), + resource.TestCheckResourceAttr(resourceName, "customer_algorithm", ""), + resource.TestCheckNoResourceAttr(resourceName, "customer_key"), + resource.TestCheckResourceAttr(resourceName, "customer_key_md5", ""), + resource.TestCheckResourceAttrPair(resourceName, "etag", sourceName, "etag"), + resource.TestCheckNoResourceAttr(resourceName, "expected_bucket_owner"), + resource.TestCheckNoResourceAttr(resourceName, "expected_source_bucket_owner"), + resource.TestCheckResourceAttr(resourceName, "expiration", ""), + resource.TestCheckNoResourceAttr(resourceName, "expires"), + resource.TestCheckResourceAttr(resourceName, "force_destroy", "false"), + resource.TestCheckResourceAttr(resourceName, "grant.#", "0"), + resource.TestCheckResourceAttr(resourceName, "key", targetKey), + resource.TestCheckResourceAttr(resourceName, "kms_encryption_context", ""), + resource.TestCheckResourceAttr(resourceName, "kms_key_id", ""), + resource.TestCheckResourceAttrSet(resourceName, "last_modified"), + resource.TestCheckResourceAttr(resourceName, "metadata.%", "0"), + resource.TestCheckNoResourceAttr(resourceName, "metadata_directive"), + resource.TestCheckResourceAttr(resourceName, "object_lock_legal_hold_status", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_mode", ""), + resource.TestCheckResourceAttr(resourceName, "object_lock_retain_until_date", ""), + resource.TestCheckResourceAttr(resourceName, "request_charged", "false"), + resource.TestCheckNoResourceAttr(resourceName, "request_payer"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption", "AES256"), + resource.TestCheckResourceAttrSet(resourceName, "source"), + resource.TestCheckNoResourceAttr(resourceName, "source_customer_algorithm"), + resource.TestCheckNoResourceAttr(resourceName, "source_customer_key"), + resource.TestCheckNoResourceAttr(resourceName, "source_customer_key_md5"), + resource.TestCheckResourceAttrSet(resourceName, "source_version_id"), + resource.TestCheckResourceAttr(resourceName, "storage_class", "STANDARD"), + resource.TestCheckNoResourceAttr(resourceName, "tagging_directive"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "version_id"), + resource.TestCheckResourceAttr(resourceName, "website_redirect", ""), + ), + }, + }, + }) +} + func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { for _, rs := range s.RootModule().Resources { @@ -822,3 +896,55 @@ resource "aws_s3_object_copy" "test" { } `, sourceBucket, sourceKey, targetBucket, targetKey)) } + +func testAccObjectCopyConfig_basicViaAccessPoint(sourceBucket, sourceKey, targetBucket, targetKey string) string { + return fmt.Sprintf(` +resource "aws_s3_bucket" "source" { + bucket = %[1]q + + force_destroy = true +} + +resource "aws_s3_bucket_versioning" "source" { + bucket = aws_s3_bucket.source.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_access_point" "source" { + # Must have bucket versioning enabled first + bucket = aws_s3_bucket_versioning.source.bucket + name = %[1]q +} + +resource "aws_s3_bucket" "target" { + bucket = %[3]q +} + +resource "aws_s3_bucket_versioning" "target" { + bucket = aws_s3_bucket.target.id + versioning_configuration { + status = "Enabled" + } +} + +resource "aws_s3_access_point" "target" { + # Must have bucket versioning enabled first + bucket = aws_s3_bucket_versioning.target.bucket + name = %[3]q +} + +resource "aws_s3_object" "source" { + bucket = aws_s3_bucket.source.bucket + key = %[2]q + content = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" +} + +resource "aws_s3_object_copy" "test" { + bucket = aws_s3_access_point.target.arn + key = %[4]q + source = "${aws_s3_access_point.source.arn}/object/${aws_s3_object.source.key}" +} +`, sourceBucket, sourceKey, targetBucket, targetKey) +} From b9ace538035bc9c7b11093f6b2c4907d3a36a055 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 12:24:01 -0500 Subject: [PATCH 124/438] Tweak 'stopReplicationTask'. --- internal/service/dms/endpoint.go | 3 --- internal/service/dms/replication_task.go | 6 +++++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index fd284168cee..0d597410865 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1716,9 +1716,6 @@ func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrati switch aws.StringValue(task.Status) { case replicationTaskStatusRunning: err := stopReplicationTask(ctx, rtID, conn) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeInvalidResourceStateFault) { - continue - } if err != nil { return stoppedTasks, err diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 74dee1dae40..aeb3ae87f0f 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -387,7 +387,7 @@ func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServic } func stopReplicationTask(ctx context.Context, id string, conn *dms.DatabaseMigrationService) error { - log.Printf("[DEBUG] Stopping DMS Replication Task: (%s)", id) + log.Printf("[DEBUG] Stopping DMS Replication Task: %s", id) task, err := FindReplicationTaskByID(ctx, conn, id) if err != nil { @@ -402,6 +402,10 @@ func stopReplicationTask(ctx context.Context, id string, conn *dms.DatabaseMigra ReplicationTaskArn: task.ReplicationTaskArn, }) + if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "is currently not running") { + return nil + } + if err != nil { return fmt.Errorf("stopping DMS Replication Task (%s): %w", id, err) } From c97b30f44e1a64c514689f2c2fd333be7de85632 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 12:24:19 -0500 Subject: [PATCH 125/438] r/aws_s3_object_copy: Fix 'Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`' when using S3 access point. --- internal/service/s3/object.go | 3 ++- internal/service/s3/object_copy.go | 29 ++++++++++++++++++++----- internal/service/s3/object_copy_test.go | 16 ++++++++++++-- 3 files changed, 39 insertions(+), 9 deletions(-) diff --git a/internal/service/s3/object.go b/internal/service/s3/object.go index 7a4c7ed4cb8..c3723f0e8ca 100644 --- a/internal/service/s3/object.go +++ b/internal/service/s3/object.go @@ -464,7 +464,6 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) } - uploader := manager.NewUploader(conn, manager.WithUploaderRequestOptions(optFns...)) tags := tftags.New(ctx, d.Get("tags").(map[string]interface{})) if ignoreProviderDefaultTags(ctx, d) { @@ -591,6 +590,8 @@ func resourceObjectUpload(ctx context.Context, d *schema.ResourceData, meta inte input.ChecksumAlgorithm = types.ChecksumAlgorithmCrc32 } + uploader := manager.NewUploader(conn, manager.WithUploaderRequestOptions(optFns...)) + if _, err := uploader.Upload(ctx, input); err != nil { return sdkdiag.AppendErrorf(diags, "uploading S3 Object (%s) to Bucket (%s): %s", aws.ToString(input.Key), aws.ToString(input.Bucket), err) } diff --git a/internal/service/s3/object_copy.go b/internal/service/s3/object_copy.go index 92fa8f03f6c..d9f6efc2739 100644 --- a/internal/service/s3/object_copy.go +++ b/internal/service/s3/object_copy.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" @@ -332,13 +333,18 @@ func resourceObjectCopyCreate(ctx context.Context, d *schema.ResourceData, meta func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) - output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string)) + output, err := findObjectByBucketAndKey(ctx, conn, bucket, key, "", d.Get("checksum_algorithm").(string), optFns...) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Object (%s) not found, removing from state", d.Id()) @@ -385,7 +391,7 @@ func resourceObjectCopyRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendFromErr(diags, err) } - if tags, err := ObjectListTags(ctx, conn, bucket, key); err == nil { + if tags, err := ObjectListTags(ctx, conn, bucket, key, optFns...); err == nil { setTagsOut(ctx, Tags(tags)) } else if !tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotImplemented) { // Directory buckets return HTTP status code 501, NotImplemented. return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s) Object (%s): %s", bucket, key, err) @@ -456,18 +462,23 @@ func resourceObjectCopyUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceObjectCopyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } key := sdkv1CompatibleCleanKey(d.Get("key").(string)) var err error if _, ok := d.GetOk("version_id"); ok { - _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false) + _, err = deleteAllObjectVersions(ctx, conn, bucket, key, d.Get("force_destroy").(bool), false, optFns...) } else { - err = deleteObjectVersion(ctx, conn, bucket, key, "", false) + err = deleteObjectVersion(ctx, conn, bucket, key, "", false, optFns...) } if err != nil { @@ -479,11 +490,17 @@ func resourceObjectCopyDelete(ctx context.Context, d *schema.ResourceData, meta func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) + var optFns []func(*s3.Options) + defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + bucket := d.Get("bucket").(string) if isDirectoryBucket(bucket) { conn = meta.(*conns.AWSClient).S3ExpressClient(ctx) } - defaultTagsConfig := meta.(*conns.AWSClient).DefaultTagsConfig + // Via S3 access point: "Invalid configuration: region from ARN `us-east-1` does not match client region `aws-global` and UseArnRegion is `false`". + if arn.IsARN(bucket) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } tags := defaultTagsConfig.MergeTags(tftags.New(ctx, d.Get("tags").(map[string]interface{}))) input := &s3.CopyObjectInput{ @@ -639,7 +656,7 @@ func resourceObjectCopyDoCopy(ctx context.Context, d *schema.ResourceData, meta input.WebsiteRedirectLocation = aws.String(v.(string)) } - output, err := conn.CopyObject(ctx, input) + output, err := conn.CopyObject(ctx, input, optFns...) if err != nil { return sdkdiag.AppendErrorf(diags, "copying %s to S3 Bucket (%s) Object (%s): %s", aws.ToString(input.CopySource), aws.ToString(input.Bucket), aws.ToString(input.Key), err) diff --git a/internal/service/s3/object_copy_test.go b/internal/service/s3/object_copy_test.go index aaea71cd0b4..adbfd616752 100644 --- a/internal/service/s3/object_copy_test.go +++ b/internal/service/s3/object_copy_test.go @@ -8,6 +8,8 @@ import ( "fmt" "testing" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -595,7 +597,12 @@ func testAccCheckObjectCopyDestroy(ctx context.Context) resource.TestCheckFunc { conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "") + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "", optFns...) if tfresource.NotFound(err) { continue @@ -624,7 +631,12 @@ func testAccCheckObjectCopyExists(ctx context.Context, n string) resource.TestCh conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) } - _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "") + var optFns []func(*s3.Options) + if arn.IsARN(rs.Primary.Attributes["bucket"]) && conn.Options().Region == names.GlobalRegionID { + optFns = append(optFns, func(o *s3.Options) { o.UseARNRegion = true }) + } + + _, err := tfs3.FindObjectByBucketAndKey(ctx, conn, rs.Primary.Attributes["bucket"], tfs3.SDKv1CompatibleCleanKey(rs.Primary.Attributes["key"]), rs.Primary.Attributes["etag"], "", optFns...) return err } From af06aa66fc9ce6234b70e698f505a149529ab3f4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 12:30:30 -0500 Subject: [PATCH 126/438] d/aws_dms_endpoint: Add `postgres_settings` attribute. --- .changelog/34724.txt | 4 ++ internal/service/dms/endpoint_data_source.go | 72 ++++++++++++++++++++ 2 files changed, 76 insertions(+) diff --git a/.changelog/34724.txt b/.changelog/34724.txt index 2f038716a38..46c34e9d4a7 100644 --- a/.changelog/34724.txt +++ b/.changelog/34724.txt @@ -1,3 +1,7 @@ ```release-note:enhancement resource/aws_dms_endpoint: Add `postgres_settings` configuration block +``` + +```release-note:enhancement +data-source/aws_dms_endpoint: Add `postgres_settings` attribute ``` \ No newline at end of file diff --git a/internal/service/dms/endpoint_data_source.go b/internal/service/dms/endpoint_data_source.go index c6d4c3f95a4..94ddeb07a50 100644 --- a/internal/service/dms/endpoint_data_source.go +++ b/internal/service/dms/endpoint_data_source.go @@ -240,6 +240,78 @@ func DataSourceEndpoint() *schema.Resource { Type: schema.TypeInt, Computed: true, }, + "postgres_settings": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "after_connect_script": { + Type: schema.TypeString, + Computed: true, + }, + "babelfish_database_name": { + Type: schema.TypeString, + Computed: true, + }, + "capture_ddls": { + Type: schema.TypeBool, + Computed: true, + }, + "database_mode": { + Type: schema.TypeString, + Computed: true, + }, + "ddl_artifacts_schema": { + Type: schema.TypeString, + Computed: true, + }, + "execute_timeout": { + Type: schema.TypeInt, + Computed: true, + }, + "fail_tasks_on_lob_truncation": { + Type: schema.TypeBool, + Computed: true, + }, + "heartbeat_enable": { + Type: schema.TypeBool, + Computed: true, + }, + "heartbeat_frequency": { + Type: schema.TypeInt, + Computed: true, + }, + "heartbeat_schema": { + Type: schema.TypeString, + Computed: true, + }, + "map_boolean_as_boolean": { + Type: schema.TypeBool, + Computed: true, + }, + "map_jsonb_as_clob": { + Type: schema.TypeBool, + Computed: true, + }, + "map_long_varchar_as": { + Type: schema.TypeString, + Computed: true, + }, + "max_file_size": { + Type: schema.TypeInt, + Computed: true, + }, + "plugin_name": { + Type: schema.TypeString, + Computed: true, + }, + "slot_name": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "redis_settings": { Type: schema.TypeList, Computed: true, From ac7857a69c359d92323129704bbab2c748892ef8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 13:15:05 -0500 Subject: [PATCH 127/438] Suppress 'ci.semgrep.migrate.aws-api-context' for 'conn.Options()'. --- .ci/semgrep/migrate/context.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.ci/semgrep/migrate/context.yml b/.ci/semgrep/migrate/context.yml index e8674d7eaa8..bb28f7e8213 100644 --- a/.ci/semgrep/migrate/context.yml +++ b/.ci/semgrep/migrate/context.yml @@ -27,6 +27,7 @@ rules: - pattern-not: tfkafkaconnect.$API() - pattern-not: conn.Handlers.$X(...) - pattern-not: conn.Handlers.$X.$Y(...) + - pattern-not: conn.Options() severity: ERROR - id: context-todo languages: [go] From 470dcdb16a063313fe9030b09031f319bfc0a8be Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 13:18:36 -0500 Subject: [PATCH 128/438] testAccCheckBucketAddObjects: Use 'S3ExpressClient' if necessary. --- internal/service/s3/bucket_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 0404361ae56..bddb085ee2c 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2628,7 +2628,11 @@ func testAccCheckBucketExistsWithProvider(ctx context.Context, n string, provide func testAccCheckBucketAddObjects(ctx context.Context, n string, keys ...string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + if tfs3.IsDirectoryBucket(rs.Primary.ID) { + conn = acctest.Provider.Meta().(*conns.AWSClient).S3ExpressClient(ctx) + } for _, key := range keys { _, err := conn.PutObject(ctx, &s3_sdkv2.PutObjectInput{ @@ -2648,6 +2652,7 @@ func testAccCheckBucketAddObjects(ctx context.Context, n string, keys ...string) func testAccCheckBucketAddObjectsWithLegalHold(ctx context.Context, n string, keys ...string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) for _, key := range keys { From bf90c78a07b82c48d6c5a4f37d2421507008c7ed Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 13:41:00 -0500 Subject: [PATCH 129/438] findBucket: Explain need for 'errs.Contains'. --- internal/service/s3/bucket.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 37d4a0c4244..e26e7024b3b 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1441,6 +1441,8 @@ func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string, optFn _, err := conn.HeadBucket(ctx, input, optFns...) + // For directory buckets that no longer exist it's the CreateSession call invoked by HeadBucket that returns "NoSuchBucket", + // and that error code is flattend into HeadBucket's error message -- hence the 'errs.Contains' call. if tfawserr_sdkv2.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) || tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchBucket) || errs.Contains(err, errCodeNoSuchBucket) { return &retry.NotFoundError{ LastError: err, From acf260480c8e971faea2c9229568f41e14d58efb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 13:59:28 -0500 Subject: [PATCH 130/438] r/aws_s3_object: Remove FIXMEs from acceptance tests. --- internal/service/s3/object_test.go | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/internal/service/s3/object_test.go b/internal/service/s3/object_test.go index 6c1917bb19b..96a0f905192 100644 --- a/internal/service/s3/object_test.go +++ b/internal/service/s3/object_test.go @@ -1710,9 +1710,7 @@ func TestAccS3Object_directoryBucket(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - // FIXME "Error running post-test destroy, there may be dangling resources: operation error S3: HeadObject, https response error StatusCode: 403, RequestID: 0033eada6b00018c1804fda905093646dd76f12a, HostID: SfKUL8OB, api error Forbidden: Forbidden" - // CheckDestroy: testAccCheckObjectDestroy(ctx), - CheckDestroy: acctest.CheckDestroyNoop, + CheckDestroy: testAccCheckObjectDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccObjectConfig_directoryBucket(rName), @@ -1783,9 +1781,7 @@ func TestAccS3Object_DirectoryBucket_disappears(t *testing.T) { // nosemgrep:ci. PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - // FIXME "Error running post-test destroy, there may be dangling resources: operation error S3: HeadObject, https response error StatusCode: 403, RequestID: 0033eada6b00018c1804fda905093646dd76f12a, HostID: SfKUL8OB, api error Forbidden: Forbidden" - // CheckDestroy: testAccCheckObjectDestroy(ctx), - CheckDestroy: acctest.CheckDestroyNoop, + CheckDestroy: testAccCheckObjectDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccObjectConfig_directoryBucket(rName), From bb0978b925fb89a6cdff049bdca1c420032ecda2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 14:02:08 -0500 Subject: [PATCH 131/438] r/aws_s3_object_copy: Remove FIXMEs from acceptance tests. --- internal/service/s3/object_copy_test.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/service/s3/object_copy_test.go b/internal/service/s3/object_copy_test.go index adbfd616752..4c466dfe788 100644 --- a/internal/service/s3/object_copy_test.go +++ b/internal/service/s3/object_copy_test.go @@ -449,9 +449,7 @@ func TestAccS3ObjectCopy_directoryBucket(t *testing.T) { PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - // FIXME "Error running post-test destroy, there may be dangling resources: operation error S3: HeadObject, https response error StatusCode: 403, RequestID: 0033eada6b00018c1826f0b80509eee5684ca4b6, HostID: T7lA2Yxglq, api error Forbidden: Forbidden" - // CheckDestroy: testAccCheckObjectCopyDestroy(ctx), - CheckDestroy: acctest.CheckDestroyNoop, + CheckDestroy: testAccCheckObjectCopyDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccObjectCopyConfig_directoryBucket(rName1, sourceKey, rName2, targetKey), From 49698221a04e2e3ca2e5208bb59e24f3dc23c027 Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Wed, 13 Dec 2023 13:20:24 -0600 Subject: [PATCH 132/438] Correct Slack notifier for regressions --- .github/workflows/regressions.yml | 33 ++++++++++++++++--------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/.github/workflows/regressions.yml b/.github/workflows/regressions.yml index 1e4b410ce70..578696e5e7f 100644 --- a/.github/workflows/regressions.yml +++ b/.github/workflows/regressions.yml @@ -22,20 +22,21 @@ jobs: channel-id: ${{ secrets.SLACK_CHANNEL }} payload: | { - "blocks": [ - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": ":warning: The following has been labeled as a regression:" - } - }, - { - "type": "section", - "text": { - "type": "mrkdwn", - "text": "<${{ env.EVENT_URL }}|${{ env.EVENT_TITLE }}>" - } - } - ] + "blocks": [ + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ":warning: The following has been labeled as a regression:" + } + }, + { + "type": "section", + "text": { + "type": "mrkdwn", + "text": ${{ toJSON(format('<{0}|{1}>', env.EVENT_URL, env.EVENT_TITLE)) }} + } + } + ] } + \ No newline at end of file From 25f95f97d218d93362f478c929d73f70e7c91688 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 14:23:10 -0500 Subject: [PATCH 133/438] Add CHANGELOG entry. --- .changelog/29470.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/29470.txt diff --git a/.changelog/29470.txt b/.changelog/29470.txt new file mode 100644 index 00000000000..77a2333bc51 --- /dev/null +++ b/.changelog/29470.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_dms_endpoint: Add `elasticsearch_settings.use_new_mapping_type` argument +``` \ No newline at end of file From 9642e5ca27d986ec73abcd3e2ef228aecf70754b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 14:23:28 -0500 Subject: [PATCH 134/438] r/aws_dms_endpoint: Cosmetics. --- internal/service/dms/endpoint.go | 18 ++++-------------- 1 file changed, 4 insertions(+), 14 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 080a20cd395..c150396f45c 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -68,43 +68,33 @@ func ResourceEndpoint() *schema.Resource { "endpoint_uri": { Type: schema.TypeString, Required: true, - // API returns this error with ModifyEndpoint: - // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. ForceNew: true, }, "error_retry_duration": { Type: schema.TypeInt, Optional: true, + ForceNew: true, Default: 300, ValidateFunc: validation.IntAtLeast(0), - // API returns this error with ModifyEndpoint: - // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. - ForceNew: true, }, "full_load_error_percentage": { Type: schema.TypeInt, Optional: true, + ForceNew: true, Default: 10, ValidateFunc: validation.IntBetween(0, 100), - // API returns this error with ModifyEndpoint: - // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. - ForceNew: true, }, "service_access_role_arn": { Type: schema.TypeString, Required: true, + ForceNew: true, ValidateFunc: verify.ValidARN, - // API returns this error with ModifyEndpoint: - // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. - ForceNew: true, }, "use_new_mapping_type": { Type: schema.TypeBool, Optional: true, - Default: false, - // API returns this error with ModifyEndpoint: - // InvalidParameterCombinationException: OpenSearch endpoint cant be modified. ForceNew: true, + Default: false, }, }, }, From 4eee24fea2a120c02b949efdf747c0e60c48d15d Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Wed, 13 Dec 2023 13:24:51 -0600 Subject: [PATCH 135/438] remove trailing newline --- .github/workflows/regressions.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/regressions.yml b/.github/workflows/regressions.yml index 578696e5e7f..3ac9aa683a5 100644 --- a/.github/workflows/regressions.yml +++ b/.github/workflows/regressions.yml @@ -38,5 +38,4 @@ jobs: } } ] - } - \ No newline at end of file + } \ No newline at end of file From e7e4e83bc837e6f46ea21b6dce55c687dafce789 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 14:28:36 -0500 Subject: [PATCH 136/438] Update 34893.txt --- .changelog/34893.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/34893.txt b/.changelog/34893.txt index 45f357f0418..107a64230ea 100644 --- a/.changelog/34893.txt +++ b/.changelog/34893.txt @@ -1,3 +1,3 @@ ```release-note:bug -provider: Always use the S3 regional endpoint in us-east-1 for S3 directory bucket operations. This fixes `no such host` errors +provider: Always use the S3 regional endpoint in `us-east-1` for S3 directory bucket operations. This fixes `no such host` errors ``` From 5703400c4c6a8ea4c22ff8133417cf1595ab0a8a Mon Sep 17 00:00:00 2001 From: Justin Retzolk <44710313+justinretzolk@users.noreply.github.com> Date: Wed, 13 Dec 2023 13:29:52 -0600 Subject: [PATCH 137/438] correct linting errors --- .github/workflows/regressions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/regressions.yml b/.github/workflows/regressions.yml index 3ac9aa683a5..214c719e3ff 100644 --- a/.github/workflows/regressions.yml +++ b/.github/workflows/regressions.yml @@ -38,4 +38,4 @@ jobs: } } ] - } \ No newline at end of file + } From 4d683666dfc3e1d63c060172170077ef4465ce26 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 13 Dec 2023 19:37:29 +0000 Subject: [PATCH 138/438] Update CHANGELOG.md for #34914 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bb4ebc68010..f7328e74e2c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,10 +8,12 @@ FEATURES: ENHANCEMENTS: * data-source/aws_cloudwatch_log_group: Add `log_group_class` attribute ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) +* data-source/aws_dms_endpoint: Add `postgres_settings` attribute ([#34724](https://github.com/hashicorp/terraform-provider-aws/issues/34724)) * data-source/aws_lb: Add `connection_logs` attribute ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * data-source/aws_lb: Add `dns_record_client_routing_policy` attribute ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) * data-source/aws_opensearchserverless_collection: Add `standby_replicas` attribute ([#34677](https://github.com/hashicorp/terraform-provider-aws/issues/34677)) * resource/aws_db_instance: Add support for IBM Db2 databases ([#34834](https://github.com/hashicorp/terraform-provider-aws/issues/34834)) +* resource/aws_dms_endpoint: Add `postgres_settings` configuration block ([#34724](https://github.com/hashicorp/terraform-provider-aws/issues/34724)) * resource/aws_lb: Add `connection_logs` configuration block ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * resource/aws_lb: Add plan-time validation that exactly one of either `subnets` or `subnet_mapping` is configured ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Allow the number of `subnet_mapping`s for Application Load Balancers to be changed without recreating the resource ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) From 184262f363e6c583c333e494e6fe3255262e8d64 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:15:39 -0500 Subject: [PATCH 139/438] deadcode: 'diag.FromAttributeError'. --- internal/errs/diag.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/internal/errs/diag.go b/internal/errs/diag.go index 7faf6b596c5..4b3666632d3 100644 --- a/internal/errs/diag.go +++ b/internal/errs/diag.go @@ -68,13 +68,6 @@ func NewWarningDiagnostic(summary, detail string) diag.Diagnostic { } } -func FromAttributeError(path cty.Path, err error) diag.Diagnostic { - return withPath( - NewErrorDiagnostic(err.Error(), ""), - path, - ) -} - func withPath(d diag.Diagnostic, path cty.Path) diag.Diagnostic { d.AttributePath = path return d From 0c8849a1c8b8b1b2fd848d880301f89ed2041d59 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:19:06 -0500 Subject: [PATCH 140/438] deadcode: 'flex.ExpandFrameworkListNestedBlock' and 'flex.FlattenFrameworkListNestedBlock'. --- internal/framework/flex/list.go | 52 --------------------------------- 1 file changed, 52 deletions(-) diff --git a/internal/framework/flex/list.go b/internal/framework/flex/list.go index 7199673e6eb..5555cf51686 100644 --- a/internal/framework/flex/list.go +++ b/internal/framework/flex/list.go @@ -10,9 +10,6 @@ import ( "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" - "github.com/hashicorp/terraform-provider-aws/internal/errs/fwdiag" - fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" - "github.com/hashicorp/terraform-provider-aws/internal/slices" ) func ExpandFrameworkStringList(ctx context.Context, v basetypes.ListValuable) []*string { @@ -86,52 +83,3 @@ func FlattenFrameworkStringValueListLegacy[T ~string](_ context.Context, vs []T) return types.ListValueMust(types.StringType, elems) } - -type FrameworkElementExpanderFunc[T any, U any] func(context.Context, T) U - -func ExpandFrameworkListNestedBlock[T any, U any](ctx context.Context, tfList types.List, f FrameworkElementExpanderFunc[T, U]) []U { - if tfList.IsNull() || tfList.IsUnknown() { - return nil - } - - var data []T - - _ = fwdiag.Must(0, tfList.ElementsAs(ctx, &data, false)) - - return slices.ApplyToAll(data, func(t T) U { - return f(ctx, t) - }) -} - -func ExpandFrameworkListNestedBlockPtr[T any, U any](ctx context.Context, tfList types.List, f FrameworkElementExpanderFunc[T, *U]) *U { - if tfList.IsNull() || tfList.IsUnknown() { - return nil - } - - var data []T - - _ = fwdiag.Must(0, tfList.ElementsAs(ctx, &data, false)) - - if len(data) == 0 { - return nil - } - - return f(ctx, data[0]) -} - -type FrameworkElementFlattenerFunc[T any, U any] func(context.Context, U) T - -func FlattenFrameworkListNestedBlock[T any, U any](ctx context.Context, apiObjects []U, f FrameworkElementFlattenerFunc[T, U]) types.List { - attributeTypes := fwtypes.AttributeTypesMust[T](ctx) - elementType := types.ObjectType{AttrTypes: attributeTypes} - - if len(apiObjects) == 0 { - return types.ListNull(elementType) - } - - data := slices.ApplyToAll(apiObjects, func(apiObject U) T { - return f(ctx, apiObject) - }) - - return fwdiag.Must(types.ListValueFrom(ctx, elementType, data)) -} From 40081026a52dcf50a1d67d814bca116429ec6ddc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:23:43 -0500 Subject: [PATCH 141/438] deadcode: service/dynamodb. --- internal/service/dynamodb/table_item.go | 30 ------------------------- 1 file changed, 30 deletions(-) diff --git a/internal/service/dynamodb/table_item.go b/internal/service/dynamodb/table_item.go index fae285cb8ee..344807ef2d9 100644 --- a/internal/service/dynamodb/table_item.go +++ b/internal/service/dynamodb/table_item.go @@ -8,7 +8,6 @@ import ( "fmt" "log" "reflect" - "regexp" "strings" "github.com/aws/aws-sdk-go/aws" @@ -270,35 +269,6 @@ func FindTableItem(ctx context.Context, conn *dynamodb.DynamoDB, tableName strin return out, nil } -func BuildExpressionAttributeNames(attrs map[string]*dynamodb.AttributeValue) map[string]*string { - names := map[string]*string{} - - for key := range attrs { - names["#a_"+cleanKeyName(key)] = aws.String(key) - } - - log.Printf("[DEBUG] ExpressionAttributeNames: %+v", names) - return names -} - -func cleanKeyName(key string) string { - reg, err := regexp.Compile("[A-Za-z^]+") // suspect regexp - if err != nil { - log.Printf("[ERROR] clean keyname errored %v", err) - } - return reg.ReplaceAllString(key, "") -} - -func BuildProjectionExpression(attrs map[string]*dynamodb.AttributeValue) *string { - keys := []string{} - - for key := range attrs { - keys = append(keys, cleanKeyName(key)) - } - log.Printf("[DEBUG] ProjectionExpressions: %+v", strings.Join(keys, ", #a_")) - return aws.String("#a_" + strings.Join(keys, ", #a_")) -} - func buildTableItemID(tableName string, hashKey string, rangeKey string, attrs map[string]*dynamodb.AttributeValue) string { id := []string{tableName, hashKey} From a0f8de1dd4788cb4a7fd75e27b91f7d0d55fd956 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:27:31 -0500 Subject: [PATCH 142/438] deadcode: service/ec2. --- internal/service/ec2/status.go | 32 ------------------------------- internal/service/ec2/wait.go | 35 ---------------------------------- 2 files changed, 67 deletions(-) diff --git a/internal/service/ec2/status.go b/internal/service/ec2/status.go index be8ece11bb4..67f40990dc7 100644 --- a/internal/service/ec2/status.go +++ b/internal/service/ec2/status.go @@ -960,38 +960,6 @@ func StatusVolumeModificationState(ctx context.Context, conn *ec2.EC2, id string } } -func StatusVPCState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - output, err := FindVPCByID(ctx, conn, id) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return output, aws.StringValue(output.State), nil - } -} - -func StatusVPCAttributeValue(ctx context.Context, conn *ec2.EC2, id string, attribute string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - attributeValue, err := FindVPCAttribute(ctx, conn, id, attribute) - - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return attributeValue, strconv.FormatBool(attributeValue), nil - } -} - func StatusVPCCIDRBlockAssociationState(ctx context.Context, conn *ec2.EC2, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { output, _, err := FindVPCCIDRBlockAssociationByID(ctx, conn, id) diff --git a/internal/service/ec2/wait.go b/internal/service/ec2/wait.go index bea1573b898..8319a984981 100644 --- a/internal/service/ec2/wait.go +++ b/internal/service/ec2/wait.go @@ -1667,41 +1667,6 @@ const ( vpcDeletedTimeout = 5 * time.Minute ) -func WaitVPCCreated(ctx context.Context, conn *ec2.EC2, id string) (*ec2.Vpc, error) { - stateConf := &retry.StateChangeConf{ - Pending: []string{ec2.VpcStatePending}, - Target: []string{ec2.VpcStateAvailable}, - Refresh: StatusVPCState(ctx, conn, id), - Timeout: vpcCreatedTimeout, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.Vpc); ok { - return output, err - } - - return nil, err -} - -func WaitVPCAttributeUpdated(ctx context.Context, conn *ec2.EC2, vpcID string, attribute string, expectedValue bool) (*ec2.Vpc, error) { - stateConf := &retry.StateChangeConf{ - Target: []string{strconv.FormatBool(expectedValue)}, - Refresh: StatusVPCAttributeValue(ctx, conn, vpcID, attribute), - Timeout: ec2PropagationTimeout, - Delay: 10 * time.Second, - MinTimeout: 3 * time.Second, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - - if output, ok := outputRaw.(*ec2.Vpc); ok { - return output, err - } - - return nil, err -} - func WaitVPCCIDRBlockAssociationCreated(ctx context.Context, conn *ec2.EC2, id string, timeout time.Duration) (*ec2.VpcCidrBlockState, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ec2.VpcCidrBlockStateCodeAssociating, ec2.VpcCidrBlockStateCodeDisassociated, ec2.VpcCidrBlockStateCodeFailing}, From 9826ee1b7f70200555a62725152d999045b51c52 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:29:45 -0500 Subject: [PATCH 143/438] deadcode: service/elasticache. --- .../service/elasticache/parameter_group.go | 14 ----------- .../elasticache/parameter_group_test.go | 25 ------------------- 2 files changed, 39 deletions(-) diff --git a/internal/service/elasticache/parameter_group.go b/internal/service/elasticache/parameter_group.go index a8423f2c68a..9fe188ece78 100644 --- a/internal/service/elasticache/parameter_group.go +++ b/internal/service/elasticache/parameter_group.go @@ -409,20 +409,6 @@ func FlattenParameters(list []*elasticache.Parameter) []map[string]interface{} { return result } -// Takes the result of flatmap.Expand for an array of parameters and -// returns Parameter API compatible objects -func ExpandParameters(configured []interface{}) []*elasticache.ParameterNameValue { - parameters := make([]*elasticache.ParameterNameValue, len(configured)) - - // Loop over our configured parameters and create - // an array of aws-sdk-go compatible objects - for i, pRaw := range configured { - parameters[i] = expandParameter(pRaw.(map[string]interface{})) - } - - return parameters -} - func expandParameter(param map[string]interface{}) *elasticache.ParameterNameValue { return &elasticache.ParameterNameValue{ ParameterName: aws.String(param["name"].(string)), diff --git a/internal/service/elasticache/parameter_group_test.go b/internal/service/elasticache/parameter_group_test.go index b4b0e93762c..4e6cf8069ee 100644 --- a/internal/service/elasticache/parameter_group_test.go +++ b/internal/service/elasticache/parameter_group_test.go @@ -620,31 +620,6 @@ func TestFlattenParameters(t *testing.T) { } } -func TestExpandParameters(t *testing.T) { - t.Parallel() - - expanded := []interface{}{ - map[string]interface{}{ - "name": "activerehashing", - "value": "yes", - "apply_method": "immediate", - }, - } - parameters := tfelasticache.ExpandParameters(expanded) - - expected := &elasticache.ParameterNameValue{ - ParameterName: aws.String("activerehashing"), - ParameterValue: aws.String("yes"), - } - - if !reflect.DeepEqual(parameters[0], expected) { - t.Fatalf( - "Got:\n\n%#v\n\nExpected:\n\n%#v\n", - parameters[0], - expected) - } -} - func TestParameterChanges(t *testing.T) { t.Parallel() From b7d0d8ae975cccca7ca471b183ed446c894f0fb2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:32:56 -0500 Subject: [PATCH 144/438] deadcode: service/iam. --- internal/service/iam/arn.go | 43 ---------------- internal/service/iam/arn_test.go | 84 -------------------------------- 2 files changed, 127 deletions(-) delete mode 100644 internal/service/iam/arn.go delete mode 100644 internal/service/iam/arn_test.go diff --git a/internal/service/iam/arn.go b/internal/service/iam/arn.go deleted file mode 100644 index 1abf14baf02..00000000000 --- a/internal/service/iam/arn.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam - -import ( - "fmt" - "strings" - - "github.com/aws/aws-sdk-go/aws/arn" -) - -const ( - ARNSeparator = "/" - ARNService = "iam" - - InstanceProfileResourcePrefix = "instance-profile" -) - -// InstanceProfileARNToName converts Amazon Resource Name (ARN) to Name. -func InstanceProfileARNToName(inputARN string) (string, error) { - parsedARN, err := arn.Parse(inputARN) - - if err != nil { - return "", fmt.Errorf("parsing ARN (%s): %w", inputARN, err) - } - - if actual, expected := parsedARN.Service, ARNService; actual != expected { - return "", fmt.Errorf("expected service %s in ARN (%s), got: %s", expected, inputARN, actual) - } - - resourceParts := strings.Split(parsedARN.Resource, ARNSeparator) - - if actual, expected := len(resourceParts), 2; actual < expected { - return "", fmt.Errorf("expected at least %d resource parts in ARN (%s), got: %d", expected, inputARN, actual) - } - - if actual, expected := resourceParts[0], InstanceProfileResourcePrefix; actual != expected { - return "", fmt.Errorf("expected resource prefix %s in ARN (%s), got: %s", expected, inputARN, actual) - } - - return resourceParts[len(resourceParts)-1], nil -} diff --git a/internal/service/iam/arn_test.go b/internal/service/iam/arn_test.go deleted file mode 100644 index ba07c02b7c2..00000000000 --- a/internal/service/iam/arn_test.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package iam_test - -import ( - "regexp" - "testing" - - "github.com/YakDriver/regexache" - tfiam "github.com/hashicorp/terraform-provider-aws/internal/service/iam" -) - -func TestInstanceProfileARNToName(t *testing.T) { - t.Parallel() - - testCases := []struct { - TestName string - InputARN string - ExpectedError *regexp.Regexp - ExpectedName string - }{ - { - TestName: "empty ARN", - InputARN: "", - ExpectedError: regexache.MustCompile(`parsing ARN`), - }, - { - TestName: "unparsable ARN", - InputARN: "test", - ExpectedError: regexache.MustCompile(`parsing ARN`), - }, - { - TestName: "invalid ARN service", - InputARN: "arn:aws:ec2:us-east-1:123456789012:instance/i-12345678", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected service iam`), - }, - { - TestName: "invalid ARN resource parts", - InputARN: "arn:aws:iam:us-east-1:123456789012:name", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected at least 2 resource parts`), - }, - { - TestName: "invalid ARN resource prefix", - InputARN: "arn:aws:iam:us-east-1:123456789012:role/name", //lintignore:AWSAT003,AWSAT005 - ExpectedError: regexache.MustCompile(`expected resource prefix instance-profile`), - }, - { - TestName: "valid ARN", - InputARN: "arn:aws:iam:us-east-1:123456789012:instance-profile/name", //lintignore:AWSAT003,AWSAT005 - ExpectedName: "name", - }, - { - TestName: "valid ARN with multiple parts", - InputARN: "arn:aws:iam:us-east-1:123456789012:instance-profile/path/name", //lintignore:AWSAT003,AWSAT005 - ExpectedName: "name", - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.TestName, func(t *testing.T) { - t.Parallel() - - got, err := tfiam.InstanceProfileARNToName(testCase.InputARN) - - if err == nil && testCase.ExpectedError != nil { - t.Fatalf("expected error %s, got no error", testCase.ExpectedError.String()) - } - - if err != nil && testCase.ExpectedError == nil { - t.Fatalf("got unexpected error: %s", err) - } - - if err != nil && !testCase.ExpectedError.MatchString(err.Error()) { - t.Fatalf("expected error %s, got: %s", testCase.ExpectedError.String(), err) - } - - if got != testCase.ExpectedName { - t.Errorf("got %s, expected %s", got, testCase.ExpectedName) - } - }) - } -} From e14409e52d0791e503ee873728525886e89d5765 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:33:12 -0500 Subject: [PATCH 145/438] deadcode: service/kinesis. --- internal/service/kinesis/flex.go | 19 --------------- internal/service/kinesis/flex_test.go | 34 --------------------------- 2 files changed, 53 deletions(-) delete mode 100644 internal/service/kinesis/flex.go delete mode 100644 internal/service/kinesis/flex_test.go diff --git a/internal/service/kinesis/flex.go b/internal/service/kinesis/flex.go deleted file mode 100644 index c6a83295f5b..00000000000 --- a/internal/service/kinesis/flex.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kinesis - -import ( - "github.com/aws/aws-sdk-go/service/kinesis" -) - -func FlattenShardLevelMetrics(list []*kinesis.EnhancedMetrics) []string { - if len(list) == 0 { - return []string{} - } - strs := make([]string, 0, len(list[0].ShardLevelMetrics)) - for _, s := range list[0].ShardLevelMetrics { - strs = append(strs, *s) - } - return strs -} diff --git a/internal/service/kinesis/flex_test.go b/internal/service/kinesis/flex_test.go deleted file mode 100644 index f340759d2e1..00000000000 --- a/internal/service/kinesis/flex_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package kinesis - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/kinesis" -) - -func TestFlattenShardLevelMetrics(t *testing.T) { - t.Parallel() - - expanded := []*kinesis.EnhancedMetrics{ - { - ShardLevelMetrics: []*string{ - aws.String("IncomingBytes"), - aws.String("IncomingRecords"), - }, - }, - } - result := FlattenShardLevelMetrics(expanded) - if len(result) != 2 { - t.Fatalf("expected result had %d elements, but got %d", 2, len(result)) - } - if result[0] != "IncomingBytes" { - t.Fatalf("expected element 0 to be IncomingBytes, but was %s", result[0]) - } - if result[1] != "IncomingRecords" { - t.Fatalf("expected element 0 to be IncomingRecords, but was %s", result[1]) - } -} From e94567c5d5804b572b876902ae708d5443e2d559 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:34:31 -0500 Subject: [PATCH 146/438] 'ListApplicationsPages' -> 'listApplicationsPages'. --- internal/service/kinesisanalytics/list.go | 2 +- internal/service/kinesisanalytics/sweep.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/kinesisanalytics/list.go b/internal/service/kinesisanalytics/list.go index 43d8984be93..4708493836f 100644 --- a/internal/service/kinesisanalytics/list.go +++ b/internal/service/kinesisanalytics/list.go @@ -12,7 +12,7 @@ import ( // Custom Kinesisanalytics listing functions using similar formatting as other service generated code. -func ListApplicationsPages(ctx context.Context, conn *kinesisanalytics.KinesisAnalytics, input *kinesisanalytics.ListApplicationsInput, fn func(*kinesisanalytics.ListApplicationsOutput, bool) bool) error { +func listApplicationsPages(ctx context.Context, conn *kinesisanalytics.KinesisAnalytics, input *kinesisanalytics.ListApplicationsInput, fn func(*kinesisanalytics.ListApplicationsOutput, bool) bool) error { for { output, err := conn.ListApplicationsWithContext(ctx, input) if err != nil { diff --git a/internal/service/kinesisanalytics/sweep.go b/internal/service/kinesisanalytics/sweep.go index 6b63e1b08a8..66359506afb 100644 --- a/internal/service/kinesisanalytics/sweep.go +++ b/internal/service/kinesisanalytics/sweep.go @@ -36,7 +36,7 @@ func sweepApplications(region string) error { var sweeperErrs *multierror.Error input := &kinesisanalytics.ListApplicationsInput{} - err = ListApplicationsPages(ctx, conn, input, func(page *kinesisanalytics.ListApplicationsOutput, lastPage bool) bool { + err = listApplicationsPages(ctx, conn, input, func(page *kinesisanalytics.ListApplicationsOutput, lastPage bool) bool { if page == nil { return !lastPage } From 4130694f85ea67dda7e33d1eafc04c51dfa1f960 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:35:49 -0500 Subject: [PATCH 147/438] deadcode: service/lakeformation. --- internal/service/lakeformation/strings.go | 11 ---- .../service/lakeformation/strings_test.go | 60 ------------------- 2 files changed, 71 deletions(-) diff --git a/internal/service/lakeformation/strings.go b/internal/service/lakeformation/strings.go index ece0ae79a5b..2dc826bdf37 100644 --- a/internal/service/lakeformation/strings.go +++ b/internal/service/lakeformation/strings.go @@ -23,14 +23,3 @@ func StringSlicesEqualIgnoreOrder(s1, s2 []*string) bool { return reflect.DeepEqual(v1, v2) } - -func StringSlicesEqual(s1, s2 []*string) bool { - if len(s1) != len(s2) { - return false - } - - v1 := aws.StringValueSlice(s1) - v2 := aws.StringValueSlice(s2) - - return reflect.DeepEqual(v1, v2) -} diff --git a/internal/service/lakeformation/strings_test.go b/internal/service/lakeformation/strings_test.go index a864d14f2f0..f9f890a4739 100644 --- a/internal/service/lakeformation/strings_test.go +++ b/internal/service/lakeformation/strings_test.go @@ -65,63 +65,3 @@ func TestStringSlicesEqualIgnoreOrder(t *testing.T) { } } } - -func TestStringSlicesEqual(t *testing.T) { - t.Parallel() - - equal := []interface{}{ - []interface{}{ - []string{"a", "b", "c"}, - []string{"a", "b", "c"}, - }, - []interface{}{ - []string{"b", "a", "c"}, - []string{"b", "a", "c"}, - }, - []interface{}{ - []string{"apple", "carrot", "tomato"}, - []string{"apple", "carrot", "tomato"}, - }, - []interface{}{ - []string{"Application", "Barrier", "Chilly", "Donut"}, - []string{"Application", "Barrier", "Chilly", "Donut"}, - }, - []interface{}{ - []string{}, - []string{}, - }, - } - for _, v := range equal { - if !tflakeformation.StringSlicesEqual(aws.StringSlice(v.([]interface{})[0].([]string)), aws.StringSlice(v.([]interface{})[1].([]string))) { - t.Fatalf("%v should be equal: %v", v.([]interface{})[0].([]string), v.([]interface{})[1].([]string)) - } - } - - notEqual := []interface{}{ - []interface{}{ - []string{"a", "b", "c"}, - []string{"a", "b"}, - }, - []interface{}{ - []string{"a", "b", "c"}, - []string{"b", "a", "c"}, - }, - []interface{}{ - []string{"apple", "carrot", "tomato"}, - []string{"apple", "carrot", "tomato", "zucchini"}, - }, - []interface{}{ - []string{"Application", "Barrier", "Chilly", "Donut"}, - []string{"Application", "Barrier", "Chilly", "Donuts"}, - }, - []interface{}{ - []string{}, - []string{"Application", "Barrier", "Chilly", "Donuts"}, - }, - } - for _, v := range notEqual { - if tflakeformation.StringSlicesEqual(aws.StringSlice(v.([]interface{})[0].([]string)), aws.StringSlice(v.([]interface{})[1].([]string))) { - t.Fatalf("%v should not be equal: %v", v.([]interface{})[0].([]string), v.([]interface{})[1].([]string)) - } - } -} From 41f0158da6f74374636bc059e32001728f0dd13b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 15:36:19 -0500 Subject: [PATCH 148/438] deadcode: service/lambda. --- internal/service/lambda/permission.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/internal/service/lambda/permission.go b/internal/service/lambda/permission.go index d3383355adc..410a48b1728 100644 --- a/internal/service/lambda/permission.go +++ b/internal/service/lambda/permission.go @@ -340,21 +340,6 @@ func FindPolicyStatementByTwoPartKey(ctx context.Context, conn *lambda.Lambda, f } } -func FindPolicyStatementByID(policy *Policy, id string) (*PolicyStatement, error) { - log.Printf("[DEBUG] Received %d statements in Lambda policy: %s", len(policy.Statement), policy.Statement) - for _, statement := range policy.Statement { - if statement.Sid == id { - return &statement, nil - } - } - - return nil, &retry.NotFoundError{ - LastRequest: id, - LastResponse: policy, - Message: fmt.Sprintf("Failed to find statement %q in Lambda policy:\n%s", id, policy.Statement), - } -} - func GetQualifierFromAliasOrVersionARN(arn string) (string, error) { matches := regexache.MustCompile(functionRegexp).FindStringSubmatch(arn) if len(matches) < 8 || matches[7] == "" { From 267b7046b7236b5b169b0e108c4b39bcf350f7bc Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 13 Dec 2023 20:44:41 +0000 Subject: [PATCH 149/438] Update CHANGELOG.md for #34893 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f7328e74e2c..52e14127080 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ ENHANCEMENTS: * data-source/aws_lb: Add `dns_record_client_routing_policy` attribute ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) * data-source/aws_opensearchserverless_collection: Add `standby_replicas` attribute ([#34677](https://github.com/hashicorp/terraform-provider-aws/issues/34677)) * resource/aws_db_instance: Add support for IBM Db2 databases ([#34834](https://github.com/hashicorp/terraform-provider-aws/issues/34834)) +* resource/aws_dms_endpoint: Add `elasticsearch_settings.use_new_mapping_type` argument ([#29470](https://github.com/hashicorp/terraform-provider-aws/issues/29470)) * resource/aws_dms_endpoint: Add `postgres_settings` configuration block ([#34724](https://github.com/hashicorp/terraform-provider-aws/issues/34724)) * resource/aws_lb: Add `connection_logs` configuration block ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * resource/aws_lb: Add plan-time validation that exactly one of either `subnets` or `subnet_mapping` is configured ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) @@ -24,6 +25,7 @@ ENHANCEMENTS: BUG FIXES: * data-source/aws_ecr_pull_through_cache_rule: Fix plan time validation for `ecr_repository_prefix` ([#34716](https://github.com/hashicorp/terraform-provider-aws/issues/34716)) +* provider: Always use the S3 regional endpoint in `us-east-1` for S3 directory bucket operations. This fixes `no such host` errors ([#34893](https://github.com/hashicorp/terraform-provider-aws/issues/34893)) * resource/aws_appmesh_virtual_node: Remove limit of 50 `backend`s per virtual node ([#34774](https://github.com/hashicorp/terraform-provider-aws/issues/34774)) * resource/aws_cloudwatch_log_group: Fix `invalid new value for .skip_destroy: was cty.False, but now null` errors ([#30354](https://github.com/hashicorp/terraform-provider-aws/issues/30354)) * resource/aws_cloudwatch_log_group: Remove default value (`STANDARD`) for `log_group_class` argument and mark as Computed. This fixes `InvalidParameterException: Only Standard log class is supported` errors in AWS Regions other than AWS Commercial ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) From cdc755371c5dcb9e6cfde27b1995a7112544bf89 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 15:52:41 -0500 Subject: [PATCH 150/438] r/aws_ssoadmin_trusted_token_issuer: fix comment --- internal/service/ssoadmin/trusted_token_issuer.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/ssoadmin/trusted_token_issuer.go b/internal/service/ssoadmin/trusted_token_issuer.go index a5c29ad69bf..98d5a67198c 100644 --- a/internal/service/ssoadmin/trusted_token_issuer.go +++ b/internal/service/ssoadmin/trusted_token_issuer.go @@ -289,7 +289,7 @@ func (r *resourceTrustedTokenIssuer) Update(ctx context.Context, req resource.Up } } - // updateTags requires both application and instance ARN, so must be called + // updateTags requires both trusted token issuer and instance ARN, so must be called // explicitly rather than with transparent tagging. if oldTagsAll, newTagsAll := state.TagsAll, plan.TagsAll; !newTagsAll.Equal(oldTagsAll) { if err := updateTags(ctx, conn, plan.ID.ValueString(), plan.InstanceARN.ValueString(), oldTagsAll, newTagsAll); err != nil { From 6309aa80fae843279ded6d3e9d0b3181157cc842 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 16:05:35 -0500 Subject: [PATCH 151/438] internal/framework/flex: use ValueFromList for nil slice and map values (#34912) This should prevent panics for list/sets/maps of string/*string types when nil values are returned from the AWS API and custom [List/Set/Map]Of types are used in the underlying Terraform data structures --- internal/framework/flex/auto_flatten.go | 48 ++++++++++++++--- internal/framework/flex/auto_flatten_test.go | 55 +++++++++++++++++++- internal/framework/flex/autoflex_test.go | 10 ++++ 3 files changed, 105 insertions(+), 8 deletions(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 3338491d396..36859d08985 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -332,7 +332,13 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t // []string -> types.List(OfString). // if vFrom.IsNil() { - vTo.Set(reflect.ValueOf(types.ListNull(types.StringType))) + to, d := tTo.ValueFromList(ctx, types.ListNull(types.StringType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) return diags } @@ -360,7 +366,13 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t // []string -> types.Set(OfString). // if vFrom.IsNil() { - vTo.Set(reflect.ValueOf(types.SetNull(types.StringType))) + to, d := tTo.ValueFromSet(ctx, types.SetNull(types.StringType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) return diags } @@ -393,7 +405,13 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t // []*string -> types.List(OfString). // if vFrom.IsNil() { - vTo.Set(reflect.ValueOf(types.ListNull(types.StringType))) + to, d := tTo.ValueFromList(ctx, types.ListNull(types.StringType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) return diags } @@ -422,7 +440,13 @@ func (flattener autoFlattener) slice(ctx context.Context, vFrom reflect.Value, t // []string -> types.Set(OfString). // if vFrom.IsNil() { - vTo.Set(reflect.ValueOf(types.SetNull(types.StringType))) + to, d := tTo.ValueFromSet(ctx, types.SetNull(types.StringType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) return diags } @@ -500,7 +524,13 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT // map[string]string -> types.Map(OfString). // if vFrom.IsNil() { - vTo.Set(reflect.ValueOf(types.MapNull(types.StringType))) + to, d := tTo.ValueFromMap(ctx, types.MapNull(types.StringType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) return diags } @@ -542,7 +572,13 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT // map[string]*string -> types.Map(OfString). // if vFrom.IsNil() { - vTo.Set(reflect.ValueOf(types.MapNull(types.StringType))) + to, d := tTo.ValueFromMap(ctx, types.MapNull(types.StringType)) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(to)) return diags } diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 271a2cd0200..a5a2b9f424b 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -166,7 +166,7 @@ func TestFlatten(t *testing.T) { }, }, { - TestName: "zero value slice/map of primtive primtive types Source and List/Set/Map of primtive types Target", + TestName: "zero value slice/map of primtive types Source and List/Set/Map of primtive types Target", Source: &TestFlexAWS05{}, Target: &TestFlexTF04{}, WantTarget: &TestFlexTF04{ @@ -179,7 +179,7 @@ func TestFlatten(t *testing.T) { }, }, { - TestName: "slice/map of primtive primtive types Source and List/Set/Map of primtive types Target", + TestName: "slice/map of primtive types Source and List/Set/Map of primtive types Target", Source: &TestFlexAWS05{ Field1: []string{"a", "b"}, Field2: aws.StringSlice([]string{"a", "b"}), @@ -216,6 +216,57 @@ func TestFlatten(t *testing.T) { }), }, }, + { + TestName: "zero value slice/map of string type Source and List/Set/Map of string types Target", + Source: &TestFlexAWS05{}, + Target: &TestFlexTF18{}, + WantTarget: &TestFlexTF18{ + Field1: fwtypes.NewListValueOfNull[types.String](ctx), + Field2: fwtypes.NewListValueOfNull[types.String](ctx), + Field3: fwtypes.NewSetValueOfNull[types.String](ctx), + Field4: fwtypes.NewSetValueOfNull[types.String](ctx), + Field5: fwtypes.NewMapValueOfNull[types.String](ctx), + Field6: fwtypes.NewMapValueOfNull[types.String](ctx), + }, + }, + { + TestName: "slice/map of string types Source and List/Set/Map of string types Target", + Source: &TestFlexAWS05{ + Field1: []string{"a", "b"}, + Field2: aws.StringSlice([]string{"a", "b"}), + Field3: []string{"a", "b"}, + Field4: aws.StringSlice([]string{"a", "b"}), + Field5: map[string]string{"A": "a", "B": "b"}, + Field6: aws.StringMap(map[string]string{"A": "a", "B": "b"}), + }, + Target: &TestFlexTF18{}, + WantTarget: &TestFlexTF18{ + Field1: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field2: fwtypes.NewListValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field3: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field4: fwtypes.NewSetValueOfMust[types.String](ctx, []attr.Value{ + types.StringValue("a"), + types.StringValue("b"), + }), + Field5: fwtypes.NewMapValueOf[types.String](ctx, map[string]basetypes.StringValue{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + Field6: fwtypes.NewMapValueOf[types.String](ctx, map[string]basetypes.StringValue{ + "A": types.StringValue("a"), + "B": types.StringValue("b"), + }), + }, + }, { TestName: "plural ordinary field names", Source: &TestFlexAWS10{ diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 4de5b2d2469..0c86261d0d6 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -287,3 +287,13 @@ type TestFlexPluralityAWS01 struct { type TestFlexTF17 struct { Field1 fwtypes.ARN `tfsdk:"field1"` } + +// List/Set/Map of string types. +type TestFlexTF18 struct { + Field1 fwtypes.ListValueOf[types.String] `tfsdk:"field1"` + Field2 fwtypes.ListValueOf[types.String] `tfsdk:"field2"` + Field3 fwtypes.SetValueOf[types.String] `tfsdk:"field3"` + Field4 fwtypes.SetValueOf[types.String] `tfsdk:"field4"` + Field5 fwtypes.MapValueOf[types.String] `tfsdk:"field5"` + Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` +} From 09faf3c89cc87e355e113c1b3edb7a19890c1365 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:12:29 -0500 Subject: [PATCH 152/438] Add CHANGELOG entry. --- .changelog/33731.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/33731.txt diff --git a/.changelog/33731.txt b/.changelog/33731.txt new file mode 100644 index 00000000000..67f032e3959 --- /dev/null +++ b/.changelog/33731.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dms_event_subscription: `source_ids` and `source_type` are Required +``` \ No newline at end of file From a64589750fc95e59c0a1118ee5bc6ef9994d6d82 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:20:46 -0500 Subject: [PATCH 153/438] r/aws_dms_event_subscription: Tidy up Create. --- internal/service/dms/event_subscription.go | 27 ++++++++-------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 994759fdf34..7609bc78557 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -33,6 +33,7 @@ func ResourceEventSubscription() *schema.Resource { ReadWithoutTimeout: resourceEventSubscriptionRead, UpdateWithoutTimeout: resourceEventSubscriptionUpdate, DeleteWithoutTimeout: resourceEventSubscriptionDelete, + Timeouts: &schema.ResourceTimeout{ Create: schema.DefaultTimeout(10 * time.Minute), Delete: schema.DefaultTimeout(10 * time.Minute), @@ -56,7 +57,6 @@ func ResourceEventSubscription() *schema.Resource { "event_categories": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, Required: true, }, "name": { @@ -73,14 +73,12 @@ func ResourceEventSubscription() *schema.Resource { "source_ids": { Type: schema.TypeSet, Elem: &schema.Schema{Type: schema.TypeString}, - Set: schema.HashString, - ForceNew: true, Required: true, + ForceNew: true, }, "source_type": { Type: schema.TypeString, Required: true, - // The API suppors modification but doing so loses all source_ids ForceNew: true, ValidateFunc: validation.StringInSlice([]string{ "replication-instance", @@ -99,29 +97,24 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - request := &dms.CreateEventSubscriptionInput{ + name := d.Get("name").(string) + input := &dms.CreateEventSubscriptionInput{ Enabled: aws.Bool(d.Get("enabled").(bool)), + EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get("sns_topic_arn").(string)), - SubscriptionName: aws.String(d.Get("name").(string)), + SourceIds: flex.ExpandStringSet(d.Get("source_ids").(*schema.Set)), SourceType: aws.String(d.Get("source_type").(string)), + SubscriptionName: aws.String(name), Tags: getTagsIn(ctx), } - if v, ok := d.GetOk("event_categories"); ok { - request.EventCategories = flex.ExpandStringSet(v.(*schema.Set)) - } - - if v, ok := d.GetOk("source_ids"); ok { - request.SourceIds = flex.ExpandStringSet(v.(*schema.Set)) - } - - _, err := conn.CreateEventSubscriptionWithContext(ctx, request) + _, err := conn.CreateEventSubscriptionWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating DMS Event Subscription (%s): %s", d.Get("name").(string), err) + return sdkdiag.AppendErrorf(diags, "creating DMS Event Subscription (%s): %s", name, err) } - d.SetId(d.Get("name").(string)) + d.SetId(name) stateConf := &retry.StateChangeConf{ Pending: []string{"creating", "modifying"}, From c50dbf8b9a505c3ea3fd6a55de7466367c77fda8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:26:50 -0500 Subject: [PATCH 154/438] r/aws_dms_event_subscription: Tidy up Update. --- internal/service/dms/event_subscription.go | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 7609bc78557..9755b5e7067 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -184,19 +184,16 @@ func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - if d.HasChanges("enabled", "event_categories", "sns_topic_arn", "source_type") { - request := &dms.ModifyEventSubscriptionInput{ + if d.HasChangesExcept("tags", "tags_all") { + input := &dms.ModifyEventSubscriptionInput{ Enabled: aws.Bool(d.Get("enabled").(bool)), + EventCategories: flex.ExpandStringSet(d.Get("event_categories").(*schema.Set)), SnsTopicArn: aws.String(d.Get("sns_topic_arn").(string)), - SubscriptionName: aws.String(d.Get("name").(string)), SourceType: aws.String(d.Get("source_type").(string)), + SubscriptionName: aws.String(d.Id()), } - if v, ok := d.GetOk("event_categories"); ok { - request.EventCategories = flex.ExpandStringSet(v.(*schema.Set)) - } - - _, err := conn.ModifyEventSubscriptionWithContext(ctx, request) + _, err := conn.ModifyEventSubscriptionWithContext(ctx, input) if err != nil { return sdkdiag.AppendErrorf(diags, "updating DMS Event Subscription (%s): %s", d.Id(), err) From fd1d489b444cf4d1c62d3944b76071486a0bdebb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:28:04 -0500 Subject: [PATCH 155/438] r/aws_dms_event_subscription: Tidy up Delete. --- internal/service/dms/event_subscription.go | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 9755b5e7067..b53e8dc2c7d 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -196,7 +196,7 @@ func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData _, err := conn.ModifyEventSubscriptionWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating DMS Event Subscription (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying DMS Event Subscription (%s): %s", d.Id(), err) } stateConf := &retry.StateChangeConf{ @@ -221,11 +221,10 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - request := &dms.DeleteEventSubscriptionInput{ + log.Printf("[DEBUG] Deleting DMS Event Subscription: %s", d.Id()) + _, err := conn.DeleteEventSubscriptionWithContext(ctx, &dms.DeleteEventSubscriptionInput{ SubscriptionName: aws.String(d.Id()), - } - - _, err := conn.DeleteEventSubscriptionWithContext(ctx, request) + }) if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { return diags From 1754c48d82a1b4863b4ae01ed1b86f942053b1dd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:35:57 -0500 Subject: [PATCH 156/438] r/aws_dms_event_subscription: Tidy up Read. --- internal/service/dms/event_subscription.go | 77 ++++++++++++++++------ 1 file changed, 57 insertions(+), 20 deletions(-) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index b53e8dc2c7d..3c28274f3f5 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -137,30 +138,18 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - request := &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(d.Id()), - } - - response, err := conn.DescribeEventSubscriptionsWithContext(ctx, request) + subscription, err := FindEventSubscriptionByName(ctx, conn, d.Id()) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - log.Printf("[WARN] DMS event subscription (%s) not found, removing from state", d.Id()) + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] DMS Event Subscription (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "reading DMS event subscription: %s", err) - } - - if response == nil || len(response.EventSubscriptionsList) == 0 || response.EventSubscriptionsList[0] == nil { - log.Printf("[WARN] DMS event subscription (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags + return sdkdiag.AppendErrorf(diags, "reading DMS Event Subscription (%s): %s", d.Id(), err) } - subscription := response.EventSubscriptionsList[0] - arn := arn.ARN{ Partition: meta.(*conns.AWSClient).Partition, Service: "dms", @@ -169,13 +158,12 @@ func resourceEventSubscriptionRead(ctx context.Context, d *schema.ResourceData, Resource: fmt.Sprintf("es:%s", d.Id()), }.String() d.Set("arn", arn) - d.Set("enabled", subscription.Enabled) + d.Set("event_categories", aws.StringValueSlice(subscription.EventCategoriesList)) + d.Set("name", d.Id()) d.Set("sns_topic_arn", subscription.SnsTopicArn) + d.Set("source_ids", aws.StringValueSlice(subscription.SourceIdsList)) d.Set("source_type", subscription.SourceType) - d.Set("name", d.Id()) - d.Set("event_categories", flex.FlattenStringList(subscription.EventCategoriesList)) - d.Set("source_ids", flex.FlattenStringList(subscription.SourceIdsList)) return diags } @@ -251,6 +239,55 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData return diags } +func FindEventSubscriptionByName(ctx context.Context, conn *dms.DatabaseMigrationService, name string) (*dms.EventSubscription, error) { + input := &dms.DescribeEventSubscriptionsInput{ + SubscriptionName: aws.String(name), + } + + return findEventSubscription(ctx, conn, input) +} + +func findEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) (*dms.EventSubscription, error) { + output, err := findEventSubscriptions(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findEventSubscriptions(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeEventSubscriptionsInput) ([]*dms.EventSubscription, error) { + var output []*dms.EventSubscription + + err := conn.DescribeEventSubscriptionsPagesWithContext(ctx, input, func(page *dms.DescribeEventSubscriptionsOutput, lastPage bool) bool { + if page == nil { + return !lastPage + } + + for _, v := range page.EventSubscriptionsList { + if v != nil { + output = append(output, v) + } + } + + return !lastPage + }) + + if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + return output, nil +} + func resourceEventSubscriptionStateRefreshFunc(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { v, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ From 2790705f01a42fe98e4a8bac38143f3cad9f5e43 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:43:12 -0500 Subject: [PATCH 157/438] r/aws_dms_event_subscription: Add waiters. --- internal/service/dms/consts.go | 7 ++ internal/service/dms/event_subscription.go | 111 ++++++++++++--------- 2 files changed, 73 insertions(+), 45 deletions(-) diff --git a/internal/service/dms/consts.go b/internal/service/dms/consts.go index c33690fd770..a3fa4d62582 100644 --- a/internal/service/dms/consts.go +++ b/internal/service/dms/consts.go @@ -174,3 +174,10 @@ func networkType_Values() []string { networkTypeIPv4, } } + +const ( + eventSubscriptionStatusActive = "active" + eventSubscriptionStatusCreating = "creating" + eventSubscriptionStatusDeleting = "deleting" + eventSubscriptionStatusModifying = "modifying" +) diff --git a/internal/service/dms/event_subscription.go b/internal/service/dms/event_subscription.go index 3c28274f3f5..e9226398b07 100644 --- a/internal/service/dms/event_subscription.go +++ b/internal/service/dms/event_subscription.go @@ -117,18 +117,8 @@ func resourceEventSubscriptionCreate(ctx context.Context, d *schema.ResourceData d.SetId(name) - stateConf := &retry.StateChangeConf{ - Pending: []string{"creating", "modifying"}, - Target: []string{"active"}, - Refresh: resourceEventSubscriptionStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutCreate), - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, - } - - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) creation: %s", d.Id(), err) + if _, err := waitEventSubscriptionCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) create: %s", d.Id(), err) } return append(diags, resourceEventSubscriptionRead(ctx, d, meta)...) @@ -187,18 +177,8 @@ func resourceEventSubscriptionUpdate(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "modifying DMS Event Subscription (%s): %s", d.Id(), err) } - stateConf := &retry.StateChangeConf{ - Pending: []string{"modifying"}, - Target: []string{"active"}, - Refresh: resourceEventSubscriptionStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutUpdate), - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, - } - - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) modification: %s", d.Id(), err) + if _, err := waitEventSubscriptionUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) update: %s", d.Id(), err) } } @@ -222,18 +202,8 @@ func resourceEventSubscriptionDelete(ctx context.Context, d *schema.ResourceData return sdkdiag.AppendErrorf(diags, "deleting DMS Event Subscription (%s): %s", d.Id(), err) } - stateConf := &retry.StateChangeConf{ - Pending: []string{"deleting"}, - Target: []string{}, - Refresh: resourceEventSubscriptionStateRefreshFunc(ctx, conn, d.Id()), - Timeout: d.Timeout(schema.TimeoutDelete), - MinTimeout: 10 * time.Second, - Delay: 10 * time.Second, - } - - _, err = stateConf.WaitForStateContext(ctx) - if err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) deletion: %s", d.Id(), err) + if _, err := waitEventSubscriptionDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + return sdkdiag.AppendErrorf(diags, "waiting for DMS Event Subscription (%s) delete: %s", d.Id(), err) } return diags @@ -288,13 +258,11 @@ func findEventSubscriptions(ctx context.Context, conn *dms.DatabaseMigrationServ return output, nil } -func resourceEventSubscriptionStateRefreshFunc(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { +func statusEventSubscription(ctx context.Context, conn *dms.DatabaseMigrationService, name string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - v, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(name), - }) + output, err := FindEventSubscriptionByName(ctx, conn, name) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if tfresource.NotFound(err) { return nil, "", nil } @@ -302,10 +270,63 @@ func resourceEventSubscriptionStateRefreshFunc(ctx context.Context, conn *dms.Da return nil, "", err } - if v == nil || len(v.EventSubscriptionsList) == 0 || v.EventSubscriptionsList[0] == nil { - return nil, "", nil - } + return output, aws.StringValue(output.Status), nil + } +} + +func waitEventSubscriptionCreated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eventSubscriptionStatusCreating, eventSubscriptionStatusModifying}, + Target: []string{eventSubscriptionStatusActive}, + Refresh: statusEventSubscription(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.EventSubscription); ok { + return output, err + } + + return nil, err +} + +func waitEventSubscriptionUpdated(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eventSubscriptionStatusModifying}, + Target: []string{eventSubscriptionStatusActive}, + Refresh: statusEventSubscription(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) - return v, aws.StringValue(v.EventSubscriptionsList[0].Status), nil + if output, ok := outputRaw.(*dms.EventSubscription); ok { + return output, err } + + return nil, err +} + +func waitEventSubscriptionDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, name string, timeout time.Duration) (*dms.EventSubscription, error) { + stateConf := &retry.StateChangeConf{ + Pending: []string{eventSubscriptionStatusDeleting}, + Target: []string{}, + Refresh: statusEventSubscription(ctx, conn, name), + Timeout: timeout, + MinTimeout: 10 * time.Second, + Delay: 10 * time.Second, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.EventSubscription); ok { + return output, err + } + + return nil, err } From a8d0d67bd604e174608aac9471b833695acda132 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Wed, 13 Dec 2023 16:48:26 -0500 Subject: [PATCH 158/438] r/aws_dms_event_subscription: Tidy up acceptance tests. --- .../service/dms/event_subscription_test.go | 83 ++++--------------- 1 file changed, 16 insertions(+), 67 deletions(-) diff --git a/internal/service/dms/event_subscription_test.go b/internal/service/dms/event_subscription_test.go index e222bd4068a..45f3d0c15c6 100644 --- a/internal/service/dms/event_subscription_test.go +++ b/internal/service/dms/event_subscription_test.go @@ -8,15 +8,14 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go/aws" dms "github.com/aws/aws-sdk-go/service/databasemigrationservice" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" tfdms "github.com/hashicorp/terraform-provider-aws/internal/service/dms" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccDMSEventSubscription_basic(t *testing.T) { @@ -215,11 +214,9 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) - resp, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(rs.Primary.ID), - }) + _, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { + if tfresource.NotFound(err) { continue } @@ -227,78 +224,38 @@ func testAccCheckEventSubscriptionDestroy(ctx context.Context) resource.TestChec return err } - if resp != nil && len(resp.EventSubscriptionsList) > 0 { - return fmt.Errorf("DMS event subscription still exists: %s", rs.Primary.ID) - } + return fmt.Errorf("DMS Event Subscription %s still exists", rs.Primary.ID) } return nil } } -func testAccCheckEventSubscriptionExists(ctx context.Context, n string, eventSubscription *dms.EventSubscription) resource.TestCheckFunc { +func testAccCheckEventSubscriptionExists(ctx context.Context, n string, v *dms.EventSubscription) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) - resp, err := conn.DescribeEventSubscriptionsWithContext(ctx, &dms.DescribeEventSubscriptionsInput{ - SubscriptionName: aws.String(rs.Primary.ID), - }) - if err != nil { - return fmt.Errorf("DMS event subscription error: %v", err) - } + output, err := tfdms.FindEventSubscriptionByName(ctx, conn, rs.Primary.ID) - if resp == nil || len(resp.EventSubscriptionsList) == 0 || resp.EventSubscriptionsList[0] == nil { - return fmt.Errorf("DMS event subscription not found") + if err != nil { + return err } - *eventSubscription = *resp.EventSubscriptionsList[0] + *v = *output return nil } } -func testAccEventSubscriptionConfigBase(rName string) string { - return fmt.Sprintf(` -data "aws_availability_zones" "available" { - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - +func testAccEventSubscriptionConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` data "aws_partition" "current" {} -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test" { - count = 2 - - availability_zone = data.aws_availability_zones.available.names[count.index] - cidr_block = "10.1.${count.index}.0/24" - vpc_id = aws_vpc.test.id - - tags = { - Name = aws_vpc.test.tags["Name"] - } -} - resource "aws_dms_replication_subnet_group" "test" { replication_subnet_group_description = %[1]q replication_subnet_group_id = %[1]q @@ -315,13 +272,11 @@ resource "aws_dms_replication_instance" "test" { resource "aws_sns_topic" "test" { name = %[1]q } -`, rName) +`, rName)) } func testAccEventSubscriptionConfig_enabled(rName string, enabled bool) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = %[2]t @@ -334,9 +289,7 @@ resource "aws_dms_event_subscription" "test" { } func testAccEventSubscriptionConfig_categories2(rName string, eventCategory1 string, eventCategory2 string) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = false @@ -349,9 +302,7 @@ resource "aws_dms_event_subscription" "test" { } func testAccEventSubscriptionConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = true @@ -368,9 +319,7 @@ resource "aws_dms_event_subscription" "test" { } func testAccEventSubscriptionConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose( - testAccEventSubscriptionConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccEventSubscriptionConfig_base(rName), fmt.Sprintf(` resource "aws_dms_event_subscription" "test" { name = %[1]q enabled = true From 26525e579dd6749897aead68ee7b4f2c1b6b99c0 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Wed, 13 Dec 2023 18:05:06 -0500 Subject: [PATCH 159/438] Fix acceptance test linting and doc issue. --- .../service/finspace/kx_scaling_group_test.go | 198 +++++++++--------- .../r/finspace_kx_scaling_group.html.markdown | 5 +- 2 files changed, 102 insertions(+), 101 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 3f5b3714a94..28c988f4e2a 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -147,127 +147,127 @@ func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalin func testAccKxScalingGroupConfigBase(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} - output "account_id" { - value = data.aws_caller_identity.current.account_id - } +output "account_id" { + value = data.aws_caller_identity.current.account_id +} - resource "aws_kms_key" "test" { - deletion_window_in_days = 7 - } +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} - resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn - } +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} - data "aws_iam_policy_document" "key_policy" { - statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] - resources = [ - aws_kms_key.test.arn, - ] + resources = [ + aws_kms_key.test.arn, + ] - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } - } + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } - statement { - actions = [ - "kms:*", - ] + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } + } + + statement { + actions = [ + "kms:*", + ] - resources = [ - "*", - ] + resources = [ + "*", + ] - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] } + } +} - resource "aws_kms_key_policy" "test" { - key_id = aws_kms_key.test.id - policy = data.aws_iam_policy_document.key_policy.json - } +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} - resource "aws_vpc" "test" { - cidr_block = "172.31.0.0/16" - enable_dns_hostnames = true - } +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} - resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = "172.31.32.0/20" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] - } +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} - resource "aws_security_group" "test" { - name = %[1]q - vpc_id = aws_vpc.test.id - - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - } +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} - resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} - data "aws_route_tables" "rts" { - vpc_id = aws_vpc.test.id - } +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} - resource "aws_route" "r" { - route_table_id = tolist(data.aws_route_tables.rts.ids)[0] - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test.id - } - `, rName) +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) } func testAccKxScalingGroupConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxScalingGroupConfigBase(rName), fmt.Sprintf(` - resource "aws_finspace_kx_scaling_group" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] - host_type = "kx.sg.4xlarge" - } - `, rName)) +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" +} +`, rName)) } diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown index fa5717c5862..07eb9b2ced0 100644 --- a/website/docs/r/finspace_kx_scaling_group.html.markdown +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -18,7 +18,8 @@ Terraform resource for managing an AWS FinSpace Kx Scaling Group. resource "aws_finspace_kx_scaling_group" "test" { name = "my-tf-kx-scalinggroup" environment_id = aws_finspace_kx_environment.example.id - availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" + availability_zone_id = "use1-az2" + host_type = "kx.sg.4xlarge" } ``` @@ -43,7 +44,7 @@ This resource exports the following attributes in addition to the arguments abov * `clusters` - The list of Managed kdb clusters that are currently active in the given scaling group. * `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. -*`status` - The status of scaling group. +* `status` - The status of scaling group. * CREATING – The scaling group creation is in progress. * CREATE_FAILED – The scaling group creation has failed. * ACTIVE – The scaling group is active. From 76afd366616159260a7326f126d95e6b3bc5e7b1 Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Wed, 13 Dec 2023 18:37:14 -0500 Subject: [PATCH 160/438] Fix acceptance test linting and doc issue. --- internal/service/finspace/kx_volume_test.go | 208 +++++++++--------- .../docs/r/finspace_kx_volume.html.markdown | 1 - 2 files changed, 104 insertions(+), 105 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 520c918c1d0..52008f1e25e 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -122,132 +122,132 @@ func testAccKxVolumeConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxVolumeConfigBase(rName), fmt.Sprintf(` - resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } - } - `, rName)) +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName)) } func testAccKxVolumeConfigBase(rName string) string { return fmt.Sprintf(` - data "aws_caller_identity" "current" {} - data "aws_partition" "current" {} +data "aws_caller_identity" "current" {} +data "aws_partition" "current" {} - output "account_id" { - value = data.aws_caller_identity.current.account_id - } +output "account_id" { + value = data.aws_caller_identity.current.account_id +} - resource "aws_kms_key" "test" { - deletion_window_in_days = 7 - } +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} - resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn - } +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} - data "aws_iam_policy_document" "key_policy" { - statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] +data "aws_iam_policy_document" "key_policy" { + statement { + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] - resources = [ - aws_kms_key.test.arn, - ] + resources = [ + aws_kms_key.test.arn, + ] - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } - } - - statement { - actions = [ - "kms:*", - ] - - resources = [ - "*", - ] - - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } - } + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] } - resource "aws_kms_key_policy" "test" { - key_id = aws_kms_key.test.id - policy = data.aws_iam_policy_document.key_policy.json + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] } - - resource "aws_vpc" "test" { - cidr_block = "172.31.0.0/16" - enable_dns_hostnames = true + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] } + } - resource "aws_subnet" "test" { - vpc_id = aws_vpc.test.id - cidr_block = "172.31.32.0/20" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + statement { + actions = [ + "kms:*", + ] + + resources = [ + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] } + } +} + +resource "aws_kms_key_policy" "test" { + key_id = aws_kms_key.test.id + policy = data.aws_iam_policy_document.key_policy.json +} - resource "aws_security_group" "test" { - name = %[1]q - vpc_id = aws_vpc.test.id +resource "aws_vpc" "test" { + cidr_block = "172.31.0.0/16" + enable_dns_hostnames = true +} - ingress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } +resource "aws_subnet" "test" { + vpc_id = aws_vpc.test.id + cidr_block = "172.31.32.0/20" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } - } +resource "aws_security_group" "test" { + name = %[1]q + vpc_id = aws_vpc.test.id + + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } +} - resource "aws_internet_gateway" "test" { - vpc_id = aws_vpc.test.id - } +resource "aws_internet_gateway" "test" { + vpc_id = aws_vpc.test.id +} - data "aws_route_tables" "rts" { - vpc_id = aws_vpc.test.id - } +data "aws_route_tables" "rts" { + vpc_id = aws_vpc.test.id +} - resource "aws_route" "r" { - route_table_id = tolist(data.aws_route_tables.rts.ids)[0] - destination_cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.test.id - } - `, rName) +resource "aws_route" "r" { + route_table_id = tolist(data.aws_route_tables.rts.ids)[0] + destination_cidr_block = "0.0.0.0/0" + gateway_id = aws_internet_gateway.test.id +} +`, rName) } func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index 71e855f1bc4..b573a81efdd 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -45,7 +45,6 @@ The following arguments are optional: * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume - ### nas1_configuration The nas1_configuration block supports the following arguments: From 607dd5a385720a6c7d68c7a872bdfa8d02dd1430 Mon Sep 17 00:00:00 2001 From: alexknez Date: Wed, 13 Dec 2023 14:39:26 +0000 Subject: [PATCH 161/438] Chore: Add timeouts to the aws_networkfirewall_firewall resource --- internal/service/networkfirewall/firewall.go | 30 ++++---- .../service/networkfirewall/firewall_test.go | 76 +++++++++++++++++++ .../r/networkfirewall_firewall.html.markdown | 14 ++++ 3 files changed, 106 insertions(+), 14 deletions(-) diff --git a/internal/service/networkfirewall/firewall.go b/internal/service/networkfirewall/firewall.go index 32b42c6890a..58deb7a06c2 100644 --- a/internal/service/networkfirewall/firewall.go +++ b/internal/service/networkfirewall/firewall.go @@ -40,6 +40,12 @@ func ResourceFirewall() *schema.Resource { StateContext: schema.ImportStatePassthroughContext, }, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Delete: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + CustomizeDiff: customdiff.Sequence( customdiff.ComputedIf("firewall_status", func(ctx context.Context, diff *schema.ResourceDiff, meta interface{}) bool { return diff.HasChange("subnet_mapping") @@ -191,7 +197,7 @@ func resourceFirewallCreate(ctx context.Context, d *schema.ResourceData, meta in d.SetId(aws.StringValue(output.Firewall.FirewallArn)) - if _, err := waitFirewallCreated(ctx, conn, d.Id()); err != nil { + if _, err := waitFirewallCreated(ctx, conn, d.Timeout(schema.TimeoutCreate), d.Id()); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) create: %s", d.Id(), err) } @@ -362,7 +368,7 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "associating NetworkFirewall Firewall (%s) subnets: %s", d.Id(), err) } - updateToken, err = waitFirewallUpdated(ctx, conn, d.Id()) + updateToken, err = waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) update: %s", d.Id(), err) @@ -379,7 +385,7 @@ func resourceFirewallUpdate(ctx context.Context, d *schema.ResourceData, meta in _, err := conn.DisassociateSubnetsWithContext(ctx, input) if err == nil { - /*updateToken*/ _, err = waitFirewallUpdated(ctx, conn, d.Id()) + /*updateToken*/ _, err = waitFirewallUpdated(ctx, conn, d.Timeout(schema.TimeoutUpdate), d.Id()) if err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) update: %s", d.Id(), err) @@ -411,7 +417,7 @@ func resourceFirewallDelete(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "deleting NetworkFirewall Firewall (%s): %s", d.Id(), err) } - if _, err := waitFirewallDeleted(ctx, conn, d.Id()); err != nil { + if _, err := waitFirewallDeleted(ctx, conn, d.Timeout(schema.TimeoutDelete), d.Id()); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for NetworkFirewall Firewall (%s) delete: %s", d.Id(), err) } @@ -459,16 +465,12 @@ func statusFirewall(ctx context.Context, conn *networkfirewall.NetworkFirewall, } } -const ( - firewallTimeout = 20 * time.Minute -) - -func waitFirewallCreated(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*networkfirewall.Firewall, error) { +func waitFirewallCreated(ctx context.Context, conn *networkfirewall.NetworkFirewall, timeout time.Duration, arn string) (*networkfirewall.Firewall, error) { stateConf := &retry.StateChangeConf{ Pending: []string{networkfirewall.FirewallStatusValueProvisioning}, Target: []string{networkfirewall.FirewallStatusValueReady}, Refresh: statusFirewall(ctx, conn, arn), - Timeout: firewallTimeout, + Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -480,12 +482,12 @@ func waitFirewallCreated(ctx context.Context, conn *networkfirewall.NetworkFirew return nil, err } -func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (string, error) { +func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.NetworkFirewall, timeout time.Duration, arn string) (string, error) { stateConf := &retry.StateChangeConf{ Pending: []string{networkfirewall.FirewallStatusValueProvisioning}, Target: []string{networkfirewall.FirewallStatusValueReady}, Refresh: statusFirewall(ctx, conn, arn), - Timeout: firewallTimeout, + Timeout: timeout, // Delay added to account for Associate/DisassociateSubnet calls that return // a READY status immediately after the method is called instead of immediately // returning PROVISIONING @@ -501,12 +503,12 @@ func waitFirewallUpdated(ctx context.Context, conn *networkfirewall.NetworkFirew return "", err } -func waitFirewallDeleted(ctx context.Context, conn *networkfirewall.NetworkFirewall, arn string) (*networkfirewall.Firewall, error) { +func waitFirewallDeleted(ctx context.Context, conn *networkfirewall.NetworkFirewall, timeout time.Duration, arn string) (*networkfirewall.Firewall, error) { stateConf := &retry.StateChangeConf{ Pending: []string{networkfirewall.FirewallStatusValueDeleting}, Target: []string{}, Refresh: statusFirewall(ctx, conn, arn), - Timeout: firewallTimeout, + Timeout: timeout, } outputRaw, err := stateConf.WaitForStateContext(ctx) diff --git a/internal/service/networkfirewall/firewall_test.go b/internal/service/networkfirewall/firewall_test.go index b960eaecf08..b22d42bf8d4 100644 --- a/internal/service/networkfirewall/firewall_test.go +++ b/internal/service/networkfirewall/firewall_test.go @@ -402,6 +402,55 @@ func TestAccNetworkFirewallFirewall_tags(t *testing.T) { }) } +func TestAccNetworkFirewallFirewall_timeout(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_networkfirewall_firewall.test" + policyResourceName := "aws_networkfirewall_firewall_policy.test" + subnetResourceName := "aws_subnet.test.0" + vpcResourceName := "aws_vpc.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, networkfirewall.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFirewallDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccFirewallConfig_timeout(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFirewallExists(ctx, resourceName), + acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "network-firewall", fmt.Sprintf("firewall/%s", rName)), + resource.TestCheckResourceAttr(resourceName, "delete_protection", "false"), + resource.TestCheckResourceAttr(resourceName, "description", ""), + resource.TestCheckResourceAttrPair(resourceName, "firewall_policy_arn", policyResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "firewall_status.#", "1"), + resource.TestCheckResourceAttr(resourceName, "firewall_status.0.sync_states.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "firewall_status.0.sync_states.*.availability_zone", subnetResourceName, "availability_zone"), + resource.TestMatchTypeSetElemNestedAttrs(resourceName, "firewall_status.0.sync_states.*", map[string]*regexp.Regexp{ + "attachment.0.endpoint_id": regexache.MustCompile(`vpce-`), + }), + resource.TestCheckTypeSetElemAttrPair(resourceName, "firewall_status.0.sync_states.*.attachment.0.subnet_id", subnetResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttrPair(resourceName, "vpc_id", vpcResourceName, "id"), + resource.TestCheckResourceAttr(resourceName, "subnet_mapping.#", "1"), + resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_mapping.*.subnet_id", subnetResourceName, "id"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "subnet_mapping.*", map[string]string{ + "ip_address_type": networkfirewall.IPAddressTypeIpv4, + }), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), + resource.TestCheckResourceAttrSet(resourceName, "update_token"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccNetworkFirewallFirewall_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -600,6 +649,10 @@ resource "aws_networkfirewall_firewall" "test" { subnet_mapping { subnet_id = aws_subnet.example.id } + + timeouts { + update = "1h" + } } `, rName)) } @@ -632,6 +685,10 @@ resource "aws_networkfirewall_firewall" "test" { subnet_mapping { subnet_id = aws_subnet.example.id } + + timeouts { + update = "1h" + } } `, rName)) } @@ -681,3 +738,22 @@ resource "aws_networkfirewall_firewall" "test" { } `, rName)) } + +func testAccFirewallConfig_timeout(rName string) string { + return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` +resource "aws_networkfirewall_firewall" "test" { + name = %[1]q + firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn + vpc_id = aws_vpc.test.id + + subnet_mapping { + subnet_id = aws_subnet.test[0].id + } + + timeouts { + create = "50m" + delete = "50m" + } +} +`, rName)) +} diff --git a/website/docs/r/networkfirewall_firewall.html.markdown b/website/docs/r/networkfirewall_firewall.html.markdown index e27a757a012..dcfffede682 100644 --- a/website/docs/r/networkfirewall_firewall.html.markdown +++ b/website/docs/r/networkfirewall_firewall.html.markdown @@ -25,6 +25,12 @@ resource "aws_networkfirewall_firewall" "example" { Tag1 = "Value1" Tag2 = "Value2" } + + timeouts { + create = "40m" + update = "50m" + delete = "1h" + } } ``` @@ -85,6 +91,14 @@ This resource exports the following attributes in addition to the arguments abov * `update_token` - A string token used when updating a firewall. +## Timeouts + +[Configuration options](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts): + +- `create` - (Default `30m`) +- `update` - (Default `30m`) +- `delete` - (Default `30m`) + ## Import In Terraform v1.5.0 and later, use an [`import` block](https://developer.hashicorp.com/terraform/language/import) to import Network Firewall Firewalls using their `arn`. For example: From c6f1d524cc36f1c371fe86f0fdde6c0448a7f1c9 Mon Sep 17 00:00:00 2001 From: alexknez Date: Thu, 14 Dec 2023 00:24:27 +0000 Subject: [PATCH 162/438] Add CHANGELOG --- .changelog/34918.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34918.txt diff --git a/.changelog/34918.txt b/.changelog/34918.txt new file mode 100644 index 00000000000..db6d0a0f528 --- /dev/null +++ b/.changelog/34918.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_networkfirewall_firewall: Add configurable timeouts +``` \ No newline at end of file From c12d9a5df40dc662f887fd3e6277d35206d7a026 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:08:24 -0500 Subject: [PATCH 163/438] r/aws_finspace_kx_scaling_group(test): fix test name typo --- internal/service/finspace/kx_scaling_group_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 28c988f4e2a..9730ce94184 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -58,7 +58,7 @@ func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { }) } -func TestAccFinSpaceKxScalingGroup_dissappears(t *testing.T) { +func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From 4fbaa349226a2b1d029d3c6d680f93ca4ff27558 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:09:18 -0500 Subject: [PATCH 164/438] r/aws_finspace_kx_scaling_group: reorg waiters, export finder --- internal/service/finspace/kx_scaling_group.go | 70 +++++++++---------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go index db5b8e9713c..c6248b4409e 100644 --- a/internal/service/finspace/kx_scaling_group.go +++ b/internal/service/finspace/kx_scaling_group.go @@ -148,44 +148,11 @@ func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, m return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) } -func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { - stateConf := &retry.StateChangeConf{ - Pending: enum.Slice(types.KxScalingGroupStatusCreating), - Target: enum.Slice(types.KxScalingGroupStatusActive), - Refresh: statusKxScalingGroup(ctx, conn, id), - Timeout: timeout, - NotFoundChecks: 20, - ContinuousTargetOccurence: 2, - } - - outputRaw, err := stateConf.WaitForStateContext(ctx) - if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { - return out, err - } - - return nil, err -} - -func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { - return func() (interface{}, string, error) { - out, err := findKxScalingGroupById(ctx, conn, id) - if tfresource.NotFound(err) { - return nil, "", nil - } - - if err != nil { - return nil, "", err - } - - return out, string(out.Status), nil - } -} - func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxScalingGroupById(ctx, conn, d.Id()) + out, err := FindKxScalingGroupById(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxScalingGroup (%s) not found, removing from state", d.Id()) d.SetId("") @@ -246,7 +213,7 @@ func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, m return diags } -func findKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { +func FindKxScalingGroupById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxScalingGroupOutput, error) { parts, err := flex.ExpandResourceId(id, kxScalingGroupIDPartCount, false) if err != nil { return nil, err @@ -275,6 +242,24 @@ func findKxScalingGroupById(ctx context.Context, conn *finspace.Client, id strin return out, nil } +func waitKxScalingGroupCreated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { + stateConf := &retry.StateChangeConf{ + Pending: enum.Slice(types.KxScalingGroupStatusCreating), + Target: enum.Slice(types.KxScalingGroupStatusActive), + Refresh: statusKxScalingGroup(ctx, conn, id), + Timeout: timeout, + NotFoundChecks: 20, + ContinuousTargetOccurence: 2, + } + + outputRaw, err := stateConf.WaitForStateContext(ctx) + if out, ok := outputRaw.(*finspace.GetKxScalingGroupOutput); ok { + return out, err + } + + return nil, err +} + func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxScalingGroupOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.KxScalingGroupStatusDeleting), @@ -290,3 +275,18 @@ func waitKxScalingGroupDeleted(ctx context.Context, conn *finspace.Client, id st return nil, err } + +func statusKxScalingGroup(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { + return func() (interface{}, string, error) { + out, err := FindKxScalingGroupById(ctx, conn, id) + if tfresource.NotFound(err) { + return nil, "", nil + } + + if err != nil { + return nil, "", err + } + + return out, string(out.Status), nil + } +} From 2b70bb5274915e6bf3fab984054e7544663e742e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:24:29 -0500 Subject: [PATCH 165/438] r/aws_finspace_kx_scaling_group(test): prefer finder in test check func, fmt config --- .../service/finspace/kx_scaling_group_test.go | 94 +++++++++---------- 1 file changed, 43 insertions(+), 51 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 9730ce94184..24a8a16ed35 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -28,7 +27,7 @@ func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { } ctx := acctest.Context(t) - var KxScalingGroup finspace.GetKxScalingGroupOutput + var scalingGroup finspace.GetKxScalingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_scaling_group.test" @@ -44,7 +43,7 @@ func TestAccFinSpaceKxScalingGroup_basic(t *testing.T) { { Config: testAccKxScalingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxScalingGroupStatusActive)), ), @@ -64,7 +63,7 @@ func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { } ctx := acctest.Context(t) - var KxScalingGroup finspace.GetKxScalingGroupOutput + var scalingGroup finspace.GetKxScalingGroupOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_scaling_group.test" @@ -80,7 +79,7 @@ func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { { Config: testAccKxScalingGroupConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxScalingGroupExists(ctx, resourceName, &KxScalingGroup), + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxScalingGroup(), resourceName), ), ExpectNonEmptyPlan: true, @@ -98,11 +97,7 @@ func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFu continue } - input := &finspace.GetKxScalingGroupInput{ - ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - } - _, err := conn.GetKxScalingGroup(ctx, input) + _, err := tffinspace.FindKxScalingGroupById(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -118,7 +113,7 @@ func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFu } } -func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { +func testAccCheckKxScalingGroupExists(ctx context.Context, name string, scalingGroup *finspace.GetKxScalingGroupOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -130,16 +125,13 @@ func testAccCheckKxScalingGroupExists(ctx context.Context, name string, KxScalin } conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxScalingGroup(ctx, &finspace.GetKxScalingGroupInput{ - ScalingGroupName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - }) + resp, err := tffinspace.FindKxScalingGroupById(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxScalingGroup, rs.Primary.ID, err) } - *KxScalingGroup = *resp + *scalingGroup = *resp return nil } @@ -166,45 +158,45 @@ resource "aws_finspace_kx_environment" "test" { data "aws_iam_policy_document" "key_policy" { statement { actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - + "kms:Decrypt", + "kms:GenerateDataKey" + ] + resources = [ - aws_kms_key.test.arn, + aws_kms_key.test.arn, ] - - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } } - + statement { - actions = [ - "kms:*", - ] - + actions = [ + "kms:*", + ] + resources = [ - "*", + "*", ] - + principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } } } @@ -229,15 +221,15 @@ resource "aws_security_group" "test" { vpc_id = aws_vpc.test.id ingress { - from_port = 0 - to_port = 0 + from_port = 0 + to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } egress { from_port = 0 - to_port = 0 + to_port = 0 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } From 20725a168a75604c1e3e6bbd7d7aa66ba8698f0a Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:27:43 -0500 Subject: [PATCH 166/438] r/aws_finspace_kx_scaling_group(doc): fmt config --- .../r/finspace_kx_scaling_group.html.markdown | 24 +++++++++---------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/website/docs/r/finspace_kx_scaling_group.html.markdown b/website/docs/r/finspace_kx_scaling_group.html.markdown index 07eb9b2ced0..c4f34b5b600 100644 --- a/website/docs/r/finspace_kx_scaling_group.html.markdown +++ b/website/docs/r/finspace_kx_scaling_group.html.markdown @@ -15,10 +15,10 @@ Terraform resource for managing an AWS FinSpace Kx Scaling Group. ### Basic Usage ```terraform -resource "aws_finspace_kx_scaling_group" "test" { - name = "my-tf-kx-scalinggroup" - environment_id = aws_finspace_kx_environment.example.id - availability_zone_id = "use1-az2" +resource "aws_finspace_kx_scaling_group" "example" { + name = "my-tf-kx-scalinggroup" + environment_id = aws_finspace_kx_environment.example.id + availability_zone_id = "use1-az2" host_type = "kx.sg.4xlarge" } ``` @@ -45,14 +45,14 @@ This resource exports the following attributes in addition to the arguments abov * `created_timestamp` - The timestamp at which the scaling group was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `last_modified_timestamp` - Last timestamp at which the scaling group was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. * `status` - The status of scaling group. - * CREATING – The scaling group creation is in progress. - * CREATE_FAILED – The scaling group creation has failed. - * ACTIVE – The scaling group is active. - * UPDATING – The scaling group is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * DELETING – The scaling group is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the scaling group. - * DELETED – The scaling group is successfully deleted. + * `CREATING` – The scaling group creation is in progress. + * `CREATE_FAILED` – The scaling group creation has failed. + * `ACTIVE` – The scaling group is active. + * `UPDATING` – The scaling group is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `DELETING` – The scaling group is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the scaling group. + * `DELETED` – The scaling group is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). From 6eb984ca12c687cf78e455f179755ffca5c61333 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:29:44 -0500 Subject: [PATCH 167/438] r/aws_finspace_kx_scaling_group(test): terrafmt take 2 --- internal/service/finspace/kx_scaling_group_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 24a8a16ed35..6ef696fda1e 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -162,9 +162,9 @@ data "aws_iam_policy_document" "key_policy" { "kms:GenerateDataKey" ] - resources = [ + resources = [ aws_kms_key.test.arn, - ] + ] principals { type = "Service" @@ -199,7 +199,7 @@ data "aws_iam_policy_document" "key_policy" { } } } - + resource "aws_kms_key_policy" "test" { key_id = aws_kms_key.test.id policy = data.aws_iam_policy_document.key_policy.json From 900a3fc56612d0137f2071cfd8d2db58963f8479 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Wed, 13 Dec 2023 20:38:27 -0500 Subject: [PATCH 168/438] r/aws_finspace_kx_scaling_group: prefer create.AppendDiagError --- internal/service/finspace/kx_scaling_group.go | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/internal/service/finspace/kx_scaling_group.go b/internal/service/finspace/kx_scaling_group.go index c6248b4409e..852fed8de75 100644 --- a/internal/service/finspace/kx_scaling_group.go +++ b/internal/service/finspace/kx_scaling_group.go @@ -120,7 +120,7 @@ func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, m } rID, err := flex.FlattenResourceId(idParts, kxScalingGroupIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxScalingGroup, d.Get("name").(string), err) } d.SetId(rID) @@ -134,15 +134,15 @@ func resourceKxScalingGroupCreate(ctx context.Context, d *schema.ResourceData, m out, err := conn.CreateKxScalingGroup(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), err) } if out == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxScalingGroup, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxScalingGroupCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxScalingGroup, d.Id(), err) } return append(diags, resourceKxScalingGroupRead(ctx, d, meta)...) @@ -160,7 +160,7 @@ func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, met } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxScalingGroup, d.Id(), err) } d.Set("arn", out.ScalingGroupArn) d.Set("status", out.Status) @@ -174,7 +174,7 @@ func resourceKxScalingGroupRead(ctx context.Context, d *schema.ResourceData, met parts, err := flex.ExpandResourceId(d.Id(), kxUserIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxScalingGroup, d.Id(), err) } d.Set("environment_id", parts[0]) @@ -202,12 +202,12 @@ func resourceKxScalingGroupDelete(ctx context.Context, d *schema.ResourceData, m return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxScalingGroup, d.Id(), err) } _, err = waitKxScalingGroupDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) if err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxScalingGroup, d.Id(), err) } return diags From 4a6992ca6189a78efc98abe8b69fb98509855542 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Dec 2023 06:20:09 +0000 Subject: [PATCH 169/438] build(deps): bump the aws-sdk-go group with 3 updates Bumps the aws-sdk-go group with 3 updates: [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go), [github.com/aws/aws-sdk-go-v2/service/emrserverless](https://github.com/aws/aws-sdk-go-v2) and [github.com/aws/aws-sdk-go-v2/service/pipes](https://github.com/aws/aws-sdk-go-v2). Updates `github.com/aws/aws-sdk-go` from 1.49.1 to 1.49.2 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.1...v1.49.2) Updates `github.com/aws/aws-sdk-go-v2/service/emrserverless` from 1.14.5 to 1.14.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/mq/v1.14.6/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.14.5...service/mq/v1.14.6) Updates `github.com/aws/aws-sdk-go-v2/service/pipes` from 1.9.5 to 1.9.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.9.5...service/pipes/v1.9.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/emrserverless dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/pipes dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] --- go.mod | 6 +++--- go.sum | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index 3821b9ff612..553689a5b38 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.1 + github.com/aws/aws-sdk-go v1.49.2 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.26.1 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 @@ -41,7 +41,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5 + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 @@ -65,7 +65,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 - github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5 + github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 diff --git a/go.sum b/go.sum index 002e8c2e279..1e073c94d70 100644 --- a/go.sum +++ b/go.sum @@ -22,8 +22,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= +github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -108,8 +108,8 @@ github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 h1:LEYyWSnfdSSysPr5JWUkNwOD0MvX github.com/aws/aws-sdk-go-v2/service/eks v1.35.5/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 h1:dZtEDpqYVg3i5oT8lSXxEsg6dInewHA3qNuyzHTvWck= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5 h1:IsLomor7ErBzqMCtI71gqTw0ENKbZxVhHMwSnDImbTw= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.5/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 h1:O2ppygCppB40GS7lDJUX4dGEgEdsKkX62oIAGgre/rY= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 h1:qMMMld3RbqxSZ5KEokAu+w4MGV9YlSvisJbk4iMO4m0= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5/go.mod h1:ydI4dfZIWil2hOsneE1QWDOxY/CdC37oT96S4JOrD24= github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 h1:n3TWZAn4gV2/GiJMnuNuSEkgyXHkKPEkenU5ZmmFS1o= @@ -168,8 +168,8 @@ github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 h1:V+zBQiUAATdw github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5/go.mod h1:Hky91JAG7y6hJrIoZ6IyJlB99+AFOPUIfqVQcZ+fbhY= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 h1:u0FL7wY1ni4WQkpfUiBslPmwKOltziQkGg5njTpPH6M= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5/go.mod h1:wRTpbH8h5d4SJmdsy9LNEuZNHrNtUCZMl+U1slAW4Ng= -github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5 h1:BKJlKvRxWQCjd7UyZPLlvkvBDOf7UziF5spBSkMq3J4= -github.com/aws/aws-sdk-go-v2/service/pipes v1.9.5/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 h1:cDjJ1OsUDDHP0DERFe+kon0awE0vMt+6xjd9zuOaOv8= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 h1:/BHypWAWPEuwfnlb4hJz5R1uedDGNtorZgEHYtW/wI4= github.com/aws/aws-sdk-go-v2/service/polly v1.36.5/go.mod h1:mmQzyk89+rKEfieMV8gHoFoVmrPiyKjqORj2Uk5+O04= github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 h1:yJniPHxzGy0jtJNkXYTqI8ps587kl1Jf8Luz5K8Jxjs= From 1a6b15833e9a3b627dbf15f9681b186a22b95670 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 14 Dec 2023 06:38:03 +0000 Subject: [PATCH 170/438] build(deps): bump github.com/aws/aws-sdk-go in /.ci/providerlint Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.49.1 to 1.49.2. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.1...v1.49.2) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 ++-- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 13 +++++++++++++ .ci/providerlint/vendor/modules.txt | 2 +- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index a4d456a3715..8d6db90f68b 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.1 + github.com/aws/aws-sdk-go v1.49.2 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 7375ababe78..8755e6de2fb 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -8,8 +8,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.1 h1:Dsamcd8d/nNb3A+bZ0ucfGl0vGZsW5wlRW0vhoYGoeQ= -github.com/aws/aws-sdk-go v1.49.1/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= +github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index b3d8f8c2c94..41a2711656a 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -12007,6 +12007,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -34775,6 +34778,16 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "pipes": service{ + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{}, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{}, + }, + }, "polly": service{ Endpoints: serviceEndpoints{ endpointKey{ diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index d681c129326..3b328cf28be 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.1 +# github.com/aws/aws-sdk-go v1.49.2 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints From 1e55ca64631adacc50e347892f3ee6ee2b247cea Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 12:54:15 +0000 Subject: [PATCH 171/438] Update CHANGELOG.md for #34922 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 52e14127080..65f9cccaf94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,7 @@ FEATURES: * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: @@ -30,6 +31,7 @@ BUG FIXES: * resource/aws_cloudwatch_log_group: Fix `invalid new value for .skip_destroy: was cty.False, but now null` errors ([#30354](https://github.com/hashicorp/terraform-provider-aws/issues/30354)) * resource/aws_cloudwatch_log_group: Remove default value (`STANDARD`) for `log_group_class` argument and mark as Computed. This fixes `InvalidParameterException: Only Standard log class is supported` errors in AWS Regions other than AWS Commercial ([#34812](https://github.com/hashicorp/terraform-provider-aws/issues/34812)) * resource/aws_db_instance: Fix error where Terraform loses track of resource if Blue/Green Deployment is applied outside of Terraform ([#34728](https://github.com/hashicorp/terraform-provider-aws/issues/34728)) +* resource/aws_dms_event_subscription: `source_ids` and `source_type` are Required ([#33731](https://github.com/hashicorp/terraform-provider-aws/issues/33731)) * resource/aws_ecr_pull_through_cache_rule: Fix plan time validation for `ecr_repository_prefix` ([#34716](https://github.com/hashicorp/terraform-provider-aws/issues/34716)) * resource/aws_lb: Correct in-place update of `security_groups` for Network Load Balancers when the new value is Computed ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Fix `InvalidConfigurationRequest: Load balancer attribute key 'dns_record.client_routing_policy' is not supported on load balancers with type 'network'` errors on resource Create in AWS GovCloud (US) ([#34135](https://github.com/hashicorp/terraform-provider-aws/issues/34135)) From 87ba32eac9f05c1f791ef68e840ad6c442f88291 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 08:02:39 -0500 Subject: [PATCH 172/438] Add 'internal/json' package. --- .teamcity/scripts/provider_tests/acceptance_tests.sh | 1 + .teamcity/scripts/provider_tests/unit_tests.sh | 1 + internal/json/remove.go | 4 ++++ 3 files changed, 6 insertions(+) create mode 100644 internal/json/remove.go diff --git a/.teamcity/scripts/provider_tests/acceptance_tests.sh b/.teamcity/scripts/provider_tests/acceptance_tests.sh index 40e32576ad8..3a8a7fdaf7c 100644 --- a/.teamcity/scripts/provider_tests/acceptance_tests.sh +++ b/.teamcity/scripts/provider_tests/acceptance_tests.sh @@ -44,6 +44,7 @@ TF_ACC=1 go test \ ./internal/flex/... \ ./internal/framework/... \ ./internal/generate/... \ + ./internal/json/... \ ./internal/logging/... \ ./internal/maps/... \ ./internal/provider/... \ diff --git a/.teamcity/scripts/provider_tests/unit_tests.sh b/.teamcity/scripts/provider_tests/unit_tests.sh index 02fe6c3c2c4..582a9af99c2 100644 --- a/.teamcity/scripts/provider_tests/unit_tests.sh +++ b/.teamcity/scripts/provider_tests/unit_tests.sh @@ -16,6 +16,7 @@ go test \ ./internal/flex/... \ ./internal/framework/... \ ./internal/generate/... \ + ./internal/json/... \ ./internal/logging/... \ ./internal/maps/... \ ./internal/provider/... \ diff --git a/internal/json/remove.go b/internal/json/remove.go new file mode 100644 index 00000000000..6d271c22339 --- /dev/null +++ b/internal/json/remove.go @@ -0,0 +1,4 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json From 9613585b6d52044c2285ab32196b1a06b2910000 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 08:10:50 -0500 Subject: [PATCH 173/438] 'internal/ujson' -> 'internal/json/ujson'. --- .teamcity/scripts/provider_tests/acceptance_tests.sh | 1 - .teamcity/scripts/provider_tests/unit_tests.sh | 1 - internal/{ => json}/ujson/LICENSE | 0 internal/{ => json}/ujson/quote.go | 0 internal/{ => json}/ujson/quote_test.go | 2 +- .../\302\265json.go" => "internal/json/ujson/\302\265json.go" | 0 .../json/ujson/\302\265json_test.go" | 0 internal/service/fms/managed_service_data.go | 2 +- 8 files changed, 2 insertions(+), 4 deletions(-) rename internal/{ => json}/ujson/LICENSE (100%) rename internal/{ => json}/ujson/quote.go (100%) rename internal/{ => json}/ujson/quote_test.go (98%) rename "internal/ujson/\302\265json.go" => "internal/json/ujson/\302\265json.go" (100%) rename "internal/ujson/\302\265json_test.go" => "internal/json/ujson/\302\265json_test.go" (100%) diff --git a/.teamcity/scripts/provider_tests/acceptance_tests.sh b/.teamcity/scripts/provider_tests/acceptance_tests.sh index 3a8a7fdaf7c..4b2642d92d5 100644 --- a/.teamcity/scripts/provider_tests/acceptance_tests.sh +++ b/.teamcity/scripts/provider_tests/acceptance_tests.sh @@ -55,7 +55,6 @@ TF_ACC=1 go test \ ./internal/tags/... \ ./internal/tfresource/... \ ./internal/types/... \ - ./internal/ujson/... \ ./internal/vault/... \ ./internal/verify/... \ -json -v -count=1 -parallel "%ACCTEST_PARALLELISM%" -timeout=0 -run=TestAcc diff --git a/.teamcity/scripts/provider_tests/unit_tests.sh b/.teamcity/scripts/provider_tests/unit_tests.sh index 582a9af99c2..91580f3e590 100644 --- a/.teamcity/scripts/provider_tests/unit_tests.sh +++ b/.teamcity/scripts/provider_tests/unit_tests.sh @@ -27,7 +27,6 @@ go test \ ./internal/tags/... \ ./internal/tfresource/... \ ./internal/types/... \ - ./internal/ujson/... \ ./internal/vault/... \ ./internal/verify/... \ -json diff --git a/internal/ujson/LICENSE b/internal/json/ujson/LICENSE similarity index 100% rename from internal/ujson/LICENSE rename to internal/json/ujson/LICENSE diff --git a/internal/ujson/quote.go b/internal/json/ujson/quote.go similarity index 100% rename from internal/ujson/quote.go rename to internal/json/ujson/quote.go diff --git a/internal/ujson/quote_test.go b/internal/json/ujson/quote_test.go similarity index 98% rename from internal/ujson/quote_test.go rename to internal/json/ujson/quote_test.go index 9c7c47748e5..21b19dc3f28 100644 --- a/internal/ujson/quote_test.go +++ b/internal/json/ujson/quote_test.go @@ -7,7 +7,7 @@ import ( "errors" "testing" - "github.com/hashicorp/terraform-provider-aws/internal/ujson" + "github.com/hashicorp/terraform-provider-aws/internal/json/ujson" ) type quoteTest struct { diff --git "a/internal/ujson/\302\265json.go" "b/internal/json/ujson/\302\265json.go" similarity index 100% rename from "internal/ujson/\302\265json.go" rename to "internal/json/ujson/\302\265json.go" diff --git "a/internal/ujson/\302\265json_test.go" "b/internal/json/ujson/\302\265json_test.go" similarity index 100% rename from "internal/ujson/\302\265json_test.go" rename to "internal/json/ujson/\302\265json_test.go" diff --git a/internal/service/fms/managed_service_data.go b/internal/service/fms/managed_service_data.go index dfe0409ad02..462b9ce8756 100644 --- a/internal/service/fms/managed_service_data.go +++ b/internal/service/fms/managed_service_data.go @@ -7,7 +7,7 @@ import ( "encoding/json" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-provider-aws/internal/ujson" + "github.com/hashicorp/terraform-provider-aws/internal/json/ujson" "github.com/hashicorp/terraform-provider-aws/internal/verify" ) From 71251ea5cf3db04440179301a21f877268e0a3ab Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 08:20:22 -0500 Subject: [PATCH 174/438] Add 'json.RemoveReadOnlyFields'. --- internal/json/remove.go | 40 ++++++++++++++++++++++++++++++++ internal/json/remove_test.go | 45 ++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) create mode 100644 internal/json/remove_test.go diff --git a/internal/json/remove.go b/internal/json/remove.go index 6d271c22339..cd22e993b2a 100644 --- a/internal/json/remove.go +++ b/internal/json/remove.go @@ -2,3 +2,43 @@ // SPDX-License-Identifier: MPL-2.0 package json + +import ( + "bytes" + + "github.com/hashicorp/terraform-provider-aws/internal/json/ujson" +) + +// RemoveReadOnlyFields removes read-only (can't be specified in configuration) fields from a valid JSON string. +func RemoveReadOnlyFields(in string, roFields ...string) string { + out := make([]byte, 0, len(in)) + + err := ujson.Walk([]byte(in), func(_ int, key, value []byte) bool { + if len(key) != 0 { + for _, roField := range roFields { + if bytes.Equal(key, []byte(roField)) { + // Remove the key and value from the output. + return false + } + } + } + + // Write to output. + if len(out) != 0 && ujson.ShouldAddComma(value, out[len(out)-1]) { + out = append(out, ',') + } + if len(key) > 0 { + out = append(out, key...) + out = append(out, ':') + } + out = append(out, value...) + + return true + }) + + if err != nil { + return "" + } + + return string(out) +} diff --git a/internal/json/remove_test.go b/internal/json/remove_test.go new file mode 100644 index 00000000000..8c5bc527251 --- /dev/null +++ b/internal/json/remove_test.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package json + +import ( + "testing" +) + +func TestRemoveReadOnlyFields(t *testing.T) { + t.Parallel() + + testCases := []struct { + testName string + input string + want string + }{ + { + testName: "empty JSON", + input: "{}", + want: "{}", + }, + { + testName: "single field", + input: `{ "key": 42 }`, + want: `{"key":42}`, + }, + { + testName: "with read-only field", + input: "{\"unifiedAlerting\": {\"enabled\": true}, \"plugins\": {\"pluginAdminEnabled\" :false}}", + want: "{\"unifiedAlerting\":{\"enabled\":true}}", + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + + if got, want := RemoveReadOnlyFields(testCase.input, `"plugins"`), testCase.want; got != want { + t.Errorf("RemoveReadOnlyFields(%q) = %q, want %q", testCase.input, got, want) + } + }) + } +} From 6ce0b892f04df891f11eccc355d91625e7849480 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 08:32:54 -0500 Subject: [PATCH 175/438] Add 'verify.SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs'. --- internal/verify/json.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/internal/verify/json.go b/internal/verify/json.go index 27aa395ffc1..1439938560f 100644 --- a/internal/verify/json.go +++ b/internal/verify/json.go @@ -15,6 +15,7 @@ import ( awspolicy "github.com/hashicorp/awspolicyequivalence" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" ) // SuppressEquivalentPolicyDiffs returns a difference suppression function that compares @@ -208,3 +209,18 @@ func LegacyPolicyToSet(exist, new string) (string, error) { return policyToSet, nil } + +// SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs returns a difference suppression function that compares +// two JSON strings and returns `true` if they are equivalent once read-only fields have been removed. +// Read-only fields are those that can't be specified in configuration (returned only from AWS API). +func SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs(roFields ...string) schema.SchemaDiffSuppressFunc { + return func(k, old, new string, d *schema.ResourceData) bool { + if !json.Valid([]byte(old)) || !json.Valid([]byte(new)) { + return old == new + } + + old, new = tfjson.RemoveReadOnlyFields(old, roFields...), tfjson.RemoveReadOnlyFields(new, roFields...) + + return JSONStringsEqual(old, new) + } +} From 44c774e002a1a9ee09b75403c978e931665345d6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 08:38:25 -0500 Subject: [PATCH 176/438] r/aws_dms_replication_config: Use 'verify.SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs'. --- internal/service/dms/replication_config.go | 57 +++++++--------------- 1 file changed, 17 insertions(+), 40 deletions(-) diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index 83cb89640f6..c5a8b15bf7d 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -5,7 +5,6 @@ package dms import ( "context" - "encoding/json" "fmt" "log" "time" @@ -16,6 +15,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -113,11 +113,21 @@ func ResourceReplicationConfig() *schema.Resource { ForceNew: true, }, "replication_settings": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs( + `"historyTimeslotInMinutes"`, + `"EnableLogContext"`, + `"CloudWatchLogGroup"`, + `"CloudWatchLogStream"`, + ), + DiffSuppressOnRefresh: true, + StateFunc: func(v interface{}) string { + json, _ := structure.NormalizeJsonString(v) + return json + }, }, "replication_type": { Type: schema.TypeString, @@ -233,19 +243,13 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set("replication_config_identifier", replicationConfig.ReplicationConfigIdentifier) + d.Set("replication_settings", replicationConfig.ReplicationSettings) d.Set("replication_type", replicationConfig.ReplicationType) d.Set("source_endpoint_arn", replicationConfig.SourceEndpointArn) d.Set("supplemental_settings", replicationConfig.SupplementalSettings) d.Set("table_mappings", replicationConfig.TableMappings) d.Set("target_endpoint_arn", replicationConfig.TargetEndpointArn) - settings, err := replicationConfigRemoveReadOnlySettings(aws.StringValue(replicationConfig.ReplicationSettings)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading DMS Replication Config (%s): %s", d.Id(), err) - } - - d.Set("replication_settings", settings) - return diags } @@ -664,30 +668,3 @@ func expandComputeConfigInput(tfMap map[string]interface{}) *dms.ComputeConfig { return apiObject } - -func replicationConfigRemoveReadOnlySettings(settings string) (*string, error) { - var settingsData map[string]interface{} - if err := json.Unmarshal([]byte(settings), &settingsData); err != nil { - return nil, err - } - - controlTablesSettings, ok := settingsData["ControlTablesSettings"].(map[string]interface{}) - if ok { - delete(controlTablesSettings, "historyTimeslotInMinutes") - } - - logging, ok := settingsData["Logging"].(map[string]interface{}) - if ok { - delete(logging, "EnableLogContext") - delete(logging, "CloudWatchLogGroup") - delete(logging, "CloudWatchLogStream") - } - - cleanedSettings, err := json.Marshal(settingsData) - if err != nil { - return nil, err - } - - cleanedSettingsString := string(cleanedSettings) - return &cleanedSettingsString, nil -} From de5e8e5487d3f5a4fbbf6ceb102006eca42be4c7 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:17:57 -0500 Subject: [PATCH 177/438] d/aws_polly_voices: new data source (#34916) This data source will allow practitioners to list available voices from AWS Polly. --- .changelog/34916.txt | 3 + internal/service/polly/service_package_gen.go | 7 +- internal/service/polly/voices_data_source.go | 153 ++++++++++++++++++ .../service/polly/voices_data_source_test.go | 84 ++++++++++ names/names.go | 1 + website/docs/d/polly_voices.html.markdown | 54 +++++++ 6 files changed, 301 insertions(+), 1 deletion(-) create mode 100644 .changelog/34916.txt create mode 100644 internal/service/polly/voices_data_source.go create mode 100644 internal/service/polly/voices_data_source_test.go create mode 100644 website/docs/d/polly_voices.html.markdown diff --git a/.changelog/34916.txt b/.changelog/34916.txt new file mode 100644 index 00000000000..9c96cfacdd4 --- /dev/null +++ b/.changelog/34916.txt @@ -0,0 +1,3 @@ +```release-note:new-data-source +aws_polly_voices +``` diff --git a/internal/service/polly/service_package_gen.go b/internal/service/polly/service_package_gen.go index 43fe19efee8..4391a3cc545 100644 --- a/internal/service/polly/service_package_gen.go +++ b/internal/service/polly/service_package_gen.go @@ -15,7 +15,12 @@ import ( type servicePackage struct{} func (p *servicePackage) FrameworkDataSources(ctx context.Context) []*types.ServicePackageFrameworkDataSource { - return []*types.ServicePackageFrameworkDataSource{} + return []*types.ServicePackageFrameworkDataSource{ + { + Factory: newDataSourceVoices, + Name: "Voices", + }, + } } func (p *servicePackage) FrameworkResources(ctx context.Context) []*types.ServicePackageFrameworkResource { diff --git a/internal/service/polly/voices_data_source.go b/internal/service/polly/voices_data_source.go new file mode 100644 index 00000000000..1274c5ba585 --- /dev/null +++ b/internal/service/polly/voices_data_source.go @@ -0,0 +1,153 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package polly + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/service/polly" + awstypes "github.com/aws/aws-sdk-go-v2/service/polly/types" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/datasource/schema" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/framework" + "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" + fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" + "github.com/hashicorp/terraform-provider-aws/names" +) + +// @FrameworkDataSource(name="Voices") +func newDataSourceVoices(context.Context) (datasource.DataSourceWithConfigure, error) { + return &dataSourceVoices{}, nil +} + +const ( + DSNameVoices = "Voices Data Source" +) + +type dataSourceVoices struct { + framework.DataSourceWithConfigure +} + +func (d *dataSourceVoices) Metadata(_ context.Context, req datasource.MetadataRequest, resp *datasource.MetadataResponse) { // nosemgrep:ci.meta-in-func-name + resp.TypeName = "aws_polly_voices" +} + +func (d *dataSourceVoices) Schema(ctx context.Context, req datasource.SchemaRequest, resp *datasource.SchemaResponse) { + resp.Schema = schema.Schema{ + Attributes: map[string]schema.Attribute{ + "engine": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.Engine](), + Optional: true, + }, + "id": framework.IDAttribute(), + "include_additional_language_codes": schema.BoolAttribute{ + Optional: true, + }, + "language_code": schema.StringAttribute{ + CustomType: fwtypes.StringEnumType[awstypes.LanguageCode](), + Optional: true, + }, + }, + Blocks: map[string]schema.Block{ + "voices": schema.ListNestedBlock{ + CustomType: fwtypes.NewListNestedObjectTypeOf[voicesData](ctx), + NestedObject: schema.NestedBlockObject{ + Attributes: map[string]schema.Attribute{ + "additional_language_codes": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + "gender": schema.StringAttribute{ + Computed: true, + }, + "id": schema.StringAttribute{ + Computed: true, + }, + "language_code": schema.StringAttribute{ + Computed: true, + }, + "language_name": schema.StringAttribute{ + Computed: true, + }, + "name": schema.StringAttribute{ + Computed: true, + }, + "supported_engines": schema.ListAttribute{ + CustomType: fwtypes.ListOfStringType, + ElementType: types.StringType, + Computed: true, + }, + }, + }, + }, + }, + } +} +func (d *dataSourceVoices) Read(ctx context.Context, req datasource.ReadRequest, resp *datasource.ReadResponse) { + conn := d.Meta().PollyClient(ctx) + + var data dataSourceVoicesData + resp.Diagnostics.Append(req.Config.Get(ctx, &data)...) + if resp.Diagnostics.HasError() { + return + } + data.ID = types.StringValue(d.Meta().AccountID) + + input := &polly.DescribeVoicesInput{} + resp.Diagnostics.Append(flex.Expand(ctx, data, input)...) + if resp.Diagnostics.HasError() { + return + } + + // No paginator helper so pagination must be done manually + out := &polly.DescribeVoicesOutput{} + for { + page, err := conn.DescribeVoices(ctx, input) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.Polly, create.ErrActionReading, DSNameVoices, data.ID.String(), err), + err.Error(), + ) + return + } + + if page == nil { + break + } + + if len(page.Voices) > 0 { + out.Voices = append(out.Voices, page.Voices...) + } + + input.NextToken = page.NextToken + if page.NextToken == nil { + break + } + } + + resp.Diagnostics.Append(flex.Flatten(ctx, out, &data)...) + + resp.Diagnostics.Append(resp.State.Set(ctx, &data)...) +} + +type dataSourceVoicesData struct { + Engine fwtypes.StringEnum[awstypes.Engine] `tfsdk:"engine"` + ID types.String `tfsdk:"id"` + IncludeAdditionalLanguageCodes types.Bool `tfsdk:"include_additional_language_codes"` + LanguageCode fwtypes.StringEnum[awstypes.LanguageCode] `tfsdk:"language_code"` + Voices fwtypes.ListNestedObjectValueOf[voicesData] `tfsdk:"voices"` +} + +type voicesData struct { + AdditionalLanguageCodes fwtypes.ListValueOf[types.String] `tfsdk:"additional_language_codes"` + Gender types.String `tfsdk:"gender"` + ID types.String `tfsdk:"id"` + LanguageCode types.String `tfsdk:"language_code"` + LanguageName types.String `tfsdk:"language_name"` + Name types.String `tfsdk:"name"` + SupportedEngines fwtypes.ListValueOf[types.String] `tfsdk:"supported_engines"` +} diff --git a/internal/service/polly/voices_data_source_test.go b/internal/service/polly/voices_data_source_test.go new file mode 100644 index 00000000000..21eda739a7a --- /dev/null +++ b/internal/service/polly/voices_data_source_test.go @@ -0,0 +1,84 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package polly_test + +import ( + "fmt" + "testing" + + "github.com/aws/aws-sdk-go-v2/service/polly/types" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/names" +) + +func TestAccPollyVoicesDataSource_basic(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_polly_voices.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PollyEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PollyEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccVoicesDataSourceConfig_basic(), + Check: resource.ComposeTestCheckFunc( + // verify a known voice is returned in the results + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "voices.*", map[string]string{ + "gender": "Female", + "language_code": "en-US", + "name": "Kendra", + }), + ), + }, + }, + }) +} + +func TestAccPollyVoicesDataSource_languageCode(t *testing.T) { + ctx := acctest.Context(t) + dataSourceName := "data.aws_polly_voices.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.PollyEndpointID) + }, + ErrorCheck: acctest.ErrorCheck(t, names.PollyEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: nil, + Steps: []resource.TestStep{ + { + Config: testAccVoicesDataSourceConfig_languageCode(string(types.LanguageCodeEnUs)), + Check: resource.ComposeTestCheckFunc( + // verify a known voice is returned in the results + resource.TestCheckTypeSetElemNestedAttrs(dataSourceName, "voices.*", map[string]string{ + "gender": "Female", + "language_code": "en-US", + "name": "Kendra", + }), + ), + }, + }, + }) +} + +func testAccVoicesDataSourceConfig_basic() string { + return ` +data "aws_polly_voices" "test" {} +` +} + +func testAccVoicesDataSourceConfig_languageCode(languageCode string) string { + return fmt.Sprintf(` +data "aws_polly_voices" "test" { + language_code = %[1]q +} +`, languageCode) +} diff --git a/names/names.go b/names/names.go index 7eaf6c896fe..30dd6320c9d 100644 --- a/names/names.go +++ b/names/names.go @@ -66,6 +66,7 @@ const ( ObservabilityAccessManagerEndpointID = "oam" OpenSearchServerlessEndpointID = "aoss" PipesEndpointID = "pipes" + PollyEndpointID = "polly" PricingEndpointID = "pricing" QLDBEndpointID = "qldb" RedshiftDataEndpointID = "redshift-data" diff --git a/website/docs/d/polly_voices.html.markdown b/website/docs/d/polly_voices.html.markdown new file mode 100644 index 00000000000..d9f73c2573a --- /dev/null +++ b/website/docs/d/polly_voices.html.markdown @@ -0,0 +1,54 @@ +--- +subcategory: "Polly" +layout: "aws" +page_title: "AWS: aws_polly_voices" +description: |- + Terraform data source for managing an AWS Polly Voices. +--- + +# Data Source: aws_polly_voices + +Terraform data source for managing an AWS Polly Voices. + +## Example Usage + +### Basic Usage + +```terraform +data "aws_polly_voices" "example" {} +``` + +### With Language Code + +```terraform +data "aws_polly_voices" "example" { + language_code = "en-GB" +} +``` + +## Argument Reference + +The following arguments are optional: + +* `engine` - (Optional) Engine used by Amazon Polly when processing input text for speech synthesis. Valid values are `standard`, `neural`, and `long-form`. +* `include_additional_language_codes` - (Optional) Whether to return any bilingual voices that use the specified language as an additional language. +* `language_code` - (Optional) Language identification tag for filtering the list of voices returned. If not specified, all available voices are returned. + +## Attribute Reference + +This data source exports the following attributes in addition to the arguments above: + +* `id` - AWS account ID. +* `voices` - List of voices with their properties. See [`voices` Attribute Reference](#voices-attribute-reference) below. + +### `voices` Attribute Reference + +See the [AWS Polly Voice documentation](https://docs.aws.amazon.com/polly/latest/dg/API_Voice.html) for additional details. + +* `additional_language_codes` - Additional codes for languages available for the specified voice in addition to its default language. +* `gender` - Gender of the voice. +* `id` - Amazon Polly assigned voice ID. +* `language_code` - Language code of the voice. +* `language_name` - Human readable name of the language in English. +* `name` - Name of the voice. +* `supported_engines` - Specifies which engines are supported by a given voice. From e7a637364376983820c5df30a5377cf94cbf18cf Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 14:20:29 +0000 Subject: [PATCH 178/438] Update CHANGELOG.md for #34916 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 65f9cccaf94..fce62da4221 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: +* **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) * **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) From b1db62e1f396f61a056e39583a0eeb6f113f42b0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:25:10 -0500 Subject: [PATCH 179/438] r/aws_finspace_kx_scaling_group(test): add _tags --- .../service/finspace/kx_scaling_group_test.go | 91 +++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/internal/service/finspace/kx_scaling_group_test.go b/internal/service/finspace/kx_scaling_group_test.go index 6ef696fda1e..b3ee1407893 100644 --- a/internal/service/finspace/kx_scaling_group_test.go +++ b/internal/service/finspace/kx_scaling_group_test.go @@ -88,6 +88,62 @@ func TestAccFinSpaceKxScalingGroup_disappears(t *testing.T) { }) } +func TestAccFinSpaceKxScalingGroup_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var scalingGroup finspace.GetKxScalingGroupOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_scaling_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxScalingGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxScalingGroupConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccKxScalingGroupConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccKxScalingGroupConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxScalingGroupExists(ctx, resourceName, &scalingGroup), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func testAccCheckKxScalingGroupDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -263,3 +319,38 @@ resource "aws_finspace_kx_scaling_group" "test" { } `, rName)) } + +func testAccKxScalingGroupConfig_tags1(rName, key1, value1 string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + + tags = { + %[2]q = %[3]q + } +} +`, rName, key1, value1)) +} + +func testAccKxScalingGroupConfig_tags2(rName, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose( + testAccKxScalingGroupConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, key1, value1, key2, value2)) +} From 96399e660de24c132af31bf300cd36dd13b9e910 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:35:17 -0500 Subject: [PATCH 180/438] r/aws_finspace_kx_volume: prefer create.AppendDiagError --- internal/service/finspace/kx_volume.go | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 0edbc919ffc..c76aa3b1f7b 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -177,7 +177,7 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in } rID, err := flex.FlattenResourceId(idParts, kxVolumeIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxVolume, d.Get("name").(string), err) } d.SetId(rID) @@ -203,21 +203,21 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in out, err := conn.CreateKxVolume(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) } if out == nil || out.VolumeName == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxVolumeCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxVolume, d.Id(), err) } // The CreateKxVolume API currently fails to tag the Volume when the // Tags field is set. Until the API is fixed, tag after creation instead. if err := createTags(ctx, conn, aws.ToString(out.VolumeArn), getTagsIn(ctx)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Id(), err) } return append(diags, resourceKxVolumeRead(ctx, d, meta)...) @@ -236,7 +236,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxVolume, d.Id(), err) } d.Set("arn", out.VolumeArn) @@ -252,16 +252,16 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte d.Set("availability_zones", aws.StringSlice(out.AvailabilityZoneIds)) if err := d.Set("nas1_configuration", flattenNas1Configuration(out.Nas1Configuration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } if err := d.Set("attached_clusters", flattenAttachedClusters(out.AttachedClusters)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } parts, err := flex.ExpandResourceId(d.Id(), kxVolumeIDPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxVolume, d.Id(), err) } d.Set("environment_id", parts[0]) @@ -296,10 +296,10 @@ func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta in log.Printf("[DEBUG] Updating FinSpace KxVolume (%s): %#v", d.Id(), in) if _, err := conn.UpdateKxVolume(ctx, in); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) } if _, err := waitKxVolumeUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxVolume, d.Id(), err) } return append(diags, resourceKxVolumeRead(ctx, d, meta)...) @@ -321,12 +321,12 @@ func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta in return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxVolume, d.Id(), err) } _, err = waitKxVolumeDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)) if err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxVolume, d.Id(), err) } return diags From 58ea0a7f9bcfe3225cd19b90a17e96608a7c8de9 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:39:11 -0500 Subject: [PATCH 181/438] r/aws_finspace_kx_volume: alphabetize attributes, fix conn init --- internal/service/finspace/kx_volume.go | 118 ++++++++++++------------- 1 file changed, 57 insertions(+), 61 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index c76aa3b1f7b..c550a4ead2f 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" @@ -47,7 +46,36 @@ func ResourceKxVolume() *schema.Resource { }, Schema: map[string]*schema.Schema{ - + "arn": { + Type: schema.TypeString, + Computed: true, + }, + "attached_clusters": { + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "cluster_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "cluster_status": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), + }, + "cluster_type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxClusterType](), + }, + }, + }, + Computed: true, + }, "availability_zones": { Type: schema.TypeList, Elem: &schema.Schema{ @@ -56,39 +84,31 @@ func ResourceKxVolume() *schema.Resource { Required: true, ForceNew: true, }, - "arn": { - Type: schema.TypeString, - Computed: true, - }, "az_mode": { Type: schema.TypeString, Required: true, ForceNew: true, ValidateDiagFunc: enum.Validate[types.KxAzMode](), }, - "environment_id": { + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "description": { Type: schema.TypeString, - Required: true, + Optional: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 32), + ValidateFunc: validation.StringLenBetween(1, 1000), }, - "name": { + "environment_id": { Type: schema.TypeString, Required: true, ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + ValidateFunc: validation.StringLenBetween(1, 32), }, - "description": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 1000), + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, }, "nas1_configuration": { Type: schema.TypeList, @@ -111,13 +131,11 @@ func ResourceKxVolume() *schema.Resource { }, }, }, - "created_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "last_modified_timestamp": { - Type: schema.TypeString, - Computed: true, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), }, "status": { Type: schema.TypeString, @@ -127,34 +145,14 @@ func ResourceKxVolume() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "attached_clusters": { - Type: schema.TypeList, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "cluster_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "cluster_status": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxClusterStatus](), - }, - "cluster_type": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateDiagFunc: enum.Validate[types.KxClusterType](), - }, - }, - }, - Computed: true, - }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "type": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateDiagFunc: enum.Validate[types.KxVolumeType](), + }, }, CustomizeDiff: verify.SetTagsDiff, } @@ -167,7 +165,7 @@ const ( func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) environmentId := d.Get("environment_id").(string) volumeName := d.Get("name").(string) @@ -199,8 +197,6 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in in.Nas1Configuration = expandNas1Configuration(v.([]interface{})) } - // TODO: add flatten/expand functions for remaining parameters - out, err := conn.CreateKxVolume(ctx, in) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxVolume, d.Get("name").(string), err) @@ -225,7 +221,7 @@ func resourceKxVolumeCreate(ctx context.Context, d *schema.ResourceData, meta in func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) out, err := findKxVolumeByID(ctx, conn, d.Id()) @@ -270,7 +266,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) updateVolume := false @@ -307,7 +303,7 @@ func resourceKxVolumeUpdate(ctx context.Context, d *schema.ResourceData, meta in func resourceKxVolumeDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) log.Printf("[INFO] Deleting FinSpace Kx Volume: %s", d.Id()) _, err := conn.DeleteKxVolume(ctx, &finspace.DeleteKxVolumeInput{ From cecc3f2886187882451e0aaf9765354e6e6b20b1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:41:28 -0500 Subject: [PATCH 182/438] r/aws_finspace_kx_volume(test): fix disappears test name --- internal/service/finspace/kx_volume_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 52008f1e25e..84dade247bd 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -58,7 +58,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { }) } -func TestAccFinSpaceKxVolume_dissappears(t *testing.T) { +func TestAccFinSpaceKxVolume_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From fbb6fd67133021384ac24fba1b042514ca61ef5c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:45:40 -0500 Subject: [PATCH 183/438] r/aws_finspace_kx_volume(test): use find in test check func --- internal/service/finspace/kx_volume.go | 6 +- internal/service/finspace/kx_volume_test.go | 66 +++++++++------------ 2 files changed, 32 insertions(+), 40 deletions(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index c550a4ead2f..72bad740dcc 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -223,7 +223,7 @@ func resourceKxVolumeRead(ctx context.Context, d *schema.ResourceData, meta inte var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxVolumeByID(ctx, conn, d.Id()) + out, err := FindKxVolumeByID(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxVolume (%s) not found, removing from state", d.Id()) @@ -382,7 +382,7 @@ func waitKxVolumeDeleted(ctx context.Context, conn *finspace.Client, id string, func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findKxVolumeByID(ctx, conn, id) + out, err := FindKxVolumeByID(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } @@ -395,7 +395,7 @@ func statusKxVolume(ctx context.Context, conn *finspace.Client, id string) retry } } -func findKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { +func FindKxVolumeByID(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxVolumeOutput, error) { parts, err := flex.ExpandResourceId(id, kxVolumeIDPartCount, false) if err != nil { return nil, err diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 84dade247bd..b1af01ddd14 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -28,7 +27,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { } ctx := acctest.Context(t) - var KxVolume finspace.GetKxVolumeOutput + var volume finspace.GetKxVolumeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_volume.test" @@ -44,7 +43,7 @@ func TestAccFinSpaceKxVolume_basic(t *testing.T) { { Config: testAccKxVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + testAccCheckKxVolumeExists(ctx, resourceName, &volume), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxVolumeStatusActive)), ), @@ -64,7 +63,7 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { } ctx := acctest.Context(t) - var KxVolume finspace.GetKxVolumeOutput + var volume finspace.GetKxVolumeOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_volume.test" @@ -80,7 +79,7 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { { Config: testAccKxVolumeConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxVolumeExists(ctx, resourceName, &KxVolume), + testAccCheckKxVolumeExists(ctx, resourceName, &volume), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxVolume(), resourceName), ), ExpectNonEmptyPlan: true, @@ -98,11 +97,7 @@ func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { continue } - input := &finspace.GetKxVolumeInput{ - VolumeName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - } - _, err := conn.GetKxVolume(ctx, input) + _, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -118,6 +113,30 @@ func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { } } +func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[name] + if !ok { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) + } + + if rs.Primary.ID == "" { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) + } + + conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) + + resp, err := tffinspace.FindKxVolumeByID(ctx, conn, rs.Primary.ID) + if err != nil { + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) + } + + *volume = *resp + + return nil + } +} + func testAccKxVolumeConfig_basic(rName string) string { return acctest.ConfigCompose( testAccKxVolumeConfigBase(rName), @@ -249,30 +268,3 @@ resource "aws_route" "r" { } `, rName) } - -func testAccCheckKxVolumeExists(ctx context.Context, name string, KxVolume *finspace.GetKxVolumeOutput) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[name] - if !ok { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not found")) - } - - if rs.Primary.ID == "" { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, name, errors.New("not set")) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxVolume(ctx, &finspace.GetKxVolumeInput{ - VolumeName: aws.String(rs.Primary.Attributes["name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - }) - - if err != nil { - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxVolume, rs.Primary.ID, err) - } - - *KxVolume = *resp - - return nil - } -} From 102a7f8f2952d300258214932eb5b02d25d4928e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:46:41 -0500 Subject: [PATCH 184/438] chore: changelog --- .changelog/34832.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34832.txt diff --git a/.changelog/34832.txt b/.changelog/34832.txt new file mode 100644 index 00000000000..0794acc706f --- /dev/null +++ b/.changelog/34832.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_scaling_group +``` From 08fcc0b0f79b45dc1471fd9082b1b83deb8cf16c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:47:28 -0500 Subject: [PATCH 185/438] chore: changelog --- .changelog/34833.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34833.txt diff --git a/.changelog/34833.txt b/.changelog/34833.txt new file mode 100644 index 00000000000..e1e350824ea --- /dev/null +++ b/.changelog/34833.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_volume +``` From 5a7600eda4cce5c6c3f5584c3fe26dbbc534424e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 09:59:52 -0500 Subject: [PATCH 186/438] r/aws_finspace_kx_volume(test): fmt config --- internal/service/finspace/kx_volume_test.go | 126 ++++++++++---------- 1 file changed, 61 insertions(+), 65 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index b1af01ddd14..616261e5097 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -137,32 +137,10 @@ func testAccCheckKxVolumeExists(ctx context.Context, name string, volume *finspa } } -func testAccKxVolumeConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccKxVolumeConfigBase(rName), - fmt.Sprintf(` -resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } -} -`, rName)) -} - func testAccKxVolumeConfigBase(rName string) string { return fmt.Sprintf(` data "aws_caller_identity" "current" {} data "aws_partition" "current" {} - -output "account_id" { - value = data.aws_caller_identity.current.account_id -} resource "aws_kms_key" "test" { deletion_window_in_days = 7 @@ -175,49 +153,49 @@ resource "aws_finspace_kx_environment" "test" { data "aws_iam_policy_document" "key_policy" { statement { - actions = [ - "kms:Decrypt", - "kms:GenerateDataKey" - ] - - resources = [ - aws_kms_key.test.arn, - ] - - principals { - type = "Service" - identifiers = ["finspace.amazonaws.com"] - } - - condition { - test = "ArnLike" - variable = "aws:SourceArn" - values = ["${aws_finspace_kx_environment.test.arn}/*"] - } - - condition { - test = "StringEquals" - variable = "aws:SourceAccount" - values = [data.aws_caller_identity.current.account_id] - } + actions = [ + "kms:Decrypt", + "kms:GenerateDataKey" + ] + + resources = [ + aws_kms_key.test.arn, + ] + + principals { + type = "Service" + identifiers = ["finspace.amazonaws.com"] + } + + condition { + test = "ArnLike" + variable = "aws:SourceArn" + values = ["${aws_finspace_kx_environment.test.arn}/*"] + } + + condition { + test = "StringEquals" + variable = "aws:SourceAccount" + values = [data.aws_caller_identity.current.account_id] + } } statement { - actions = [ - "kms:*", - ] - + actions = [ + "kms:*", + ] + resources = [ - "*", - ] - - principals { - type = "AWS" - identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] - } + "*", + ] + + principals { + type = "AWS" + identifiers = ["arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:root"] + } } } - + resource "aws_kms_key_policy" "test" { key_id = aws_kms_key.test.id policy = data.aws_iam_policy_document.key_policy.json @@ -240,16 +218,16 @@ resource "aws_security_group" "test" { ingress { from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } egress { from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] } } @@ -268,3 +246,21 @@ resource "aws_route" "r" { } `, rName) } + +func testAccKxVolumeConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type= "SSD_250" + size= 1200 + } +} +`, rName)) +} From 65bd94e709d92e7759709948a8d7043738510fb1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:07:48 -0500 Subject: [PATCH 187/438] r/aws_finspace_kx_volume(doc): fmt config, tidy descriptions --- .../docs/r/finspace_kx_volume.html.markdown | 48 +++++++++---------- 1 file changed, 24 insertions(+), 24 deletions(-) diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index b573a81efdd..35e75bf3e32 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -16,15 +16,15 @@ Terraform resource for managing an AWS FinSpace Kx Volume. ```terraform resource "aws_finspace_kx_volume" "example" { - name = "my-tf-kx-volume" - environment_id = aws_finspace_kx_environment.example.id - availability_zones = "use1-az2" - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_250" - size= 1200 - } + name = "my-tf-kx-volume" + environment_id = aws_finspace_kx_environment.example.id + availability_zones = "use1-az2" + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + size= 1200 + type= "SSD_250" + } } ``` @@ -33,24 +33,24 @@ resource "aws_finspace_kx_volume" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per volume. Currently, Finspace only support SINGLE for volumes. - * SINGLE - Assigns one availability zone per volume. + * `SINGLE` - Assigns one availability zone per volume. * `environment_id` - (Required) A unique identifier for the kdb environment, whose clusters can attach to the volume. * `name` - (Required) Unique name for the volumr that you want to create. -* `type` - (Required) The type of file system volume. Currently, FinSpace only supports NAS_1 volume type. When you select NAS_1 volume type, you must also provide nas1Configuration. +* `type` - (Required) The type of file system volume. Currently, FinSpace only supports the `NAS_1` volume type. When you select the `NAS_1` volume type, you must also provide `nas1_configuration`. * `availability_zones` - (Required) The identifier of the AWS Availability Zone IDs. The following arguments are optional: -* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (NAS_1) file system volume. This parameter is required when you choose volumeType as NAS_1. +* `nas1_configuration` - (Optional) Specifies the configuration for the Network attached storage (`NAS_1`) file system volume. This parameter is required when `volume_type` is `NAS_1`. See [`nas1_configuration` Argument Reference](#nas1_configuration-argument-reference) below. * `description` - (Optional) Description of the volume. * `tags` - (Optional) A list of key-value pairs to label the volume. You can add up to 50 tags to a volume -### nas1_configuration +### `nas1_configuration` Argument Reference -The nas1_configuration block supports the following arguments: +The `nas1_configuration` block supports the following arguments: * `size` - (Required) The size of the network attached storage. -* `security_group_ids` - (Required) The type of the network attached storage. +* `type` - (Required) The type of the network attached storage. ## Attribute Reference @@ -59,15 +59,15 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX volume. * `created_timestamp` - The timestamp at which the volume was created in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `status` - The status of volume creation. - * CREATING – The volume creation is in progress. - * CREATE_FAILED – The volume creation has failed. - * ACTIVE – The volume is active. - * UPDATING – The volume is in the process of being updated. - * UPDATE_FAILED – The update action failed. - * UPDATED – The volume is successfully updated. - * DELETING – The volume is in the process of being deleted. - * DELETE_FAILED – The system failed to delete the volume. - * DELETED – The volume is successfully deleted. + * `CREATING` – The volume creation is in progress. + * `CREATE_FAILED` – The volume creation has failed. + * `ACTIVE` – The volume is active. + * `UPDATING` – The volume is in the process of being updated. + * `UPDATE_FAILED` – The update action failed. + * `UPDATED` – The volume is successfully updated. + * `DELETING` – The volume is in the process of being deleted. + * `DELETE_FAILED` – The system failed to delete the volume. + * `DELETED` – The volume is successfully deleted. * `status_reason` - The error message when a failed state occurs. * `last_modified_timestamp` - Last timestamp at which the volume was updated in FinSpace. Value determined as epoch time in seconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000. From 46e58e6438388bf02468c1d887190766160999af Mon Sep 17 00:00:00 2001 From: David Hwang Date: Thu, 14 Dec 2023 10:08:51 -0500 Subject: [PATCH 188/438] Update internal/service/finspace/kx_cluster_test.go Co-authored-by: Jared Baker --- internal/service/finspace/kx_cluster_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 16c5b53d1d6..8ca4504aad7 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -672,7 +672,7 @@ func TestAccFinSpaceKxTPClusterInScalingGroup_withKxVolume(t *testing.T) { }) } -func TestAccFinSpaceKxClusterInScalingGroup_withKxDataview(t *testing.T) { +func TestAccFinSpaceKxCluster_InScalingGroupWithKxDataview(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From e68fd67a607911841e9838dcf92b2ff9a4802c62 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:09:59 -0500 Subject: [PATCH 189/438] r/aws_finspace_kx_volume(doc): fmt config again --- website/docs/r/finspace_kx_volume.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_volume.html.markdown b/website/docs/r/finspace_kx_volume.html.markdown index 35e75bf3e32..0ddc66dc9e6 100644 --- a/website/docs/r/finspace_kx_volume.html.markdown +++ b/website/docs/r/finspace_kx_volume.html.markdown @@ -22,8 +22,8 @@ resource "aws_finspace_kx_volume" "example" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - size= 1200 - type= "SSD_250" + size = 1200 + type = "SSD_250" } } ``` From a209c33f2582d1319efedb74420483db62006a96 Mon Sep 17 00:00:00 2001 From: David Hwang Date: Thu, 14 Dec 2023 10:10:21 -0500 Subject: [PATCH 190/438] Apply suggestions from code review Co-authored-by: Jared Baker --- internal/service/finspace/kx_cluster_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 8ca4504aad7..14451a2d9f1 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -32,7 +32,7 @@ func testAccPreCheckManagedKxLicenseEnabled(t *testing.T) { } } -func TestAccSKIPFinSpaceKxCluster_basic(t *testing.T) { +func TestAccFinSpaceKxCluster_basic(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -69,7 +69,7 @@ func TestAccSKIPFinSpaceKxCluster_basic(t *testing.T) { }) } -func TestAccSKIPFinSpaceKxCluster_disappears(t *testing.T) { +func TestAccFinSpaceKxCluster_disappears(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -608,7 +608,7 @@ func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { }) } -func TestAccFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -640,7 +640,7 @@ func TestAccFinSpaceKxRDBClusterInScalingGroup_withKxVolume(t *testing.T) { }) } -func TestAccFinSpaceKxTPClusterInScalingGroup_withKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } From 1ae3cedb7a2a8ced747b2c529cc12d3fda607d44 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:12:38 -0500 Subject: [PATCH 191/438] r/aws_finspace_kx_volume(test): fmt config again --- internal/service/finspace/kx_volume_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index 616261e5097..fa80b8b039a 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -258,8 +258,8 @@ resource "aws_finspace_kx_volume" "test" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + type = "SSD_250" + size = 1200 } } `, rName)) From 31cb4334856df230e0a317a3237e92cc9f9927e1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:20:51 -0500 Subject: [PATCH 192/438] r/aws_finspace_kx_dataview: add headers --- internal/service/finspace/kx_dataview_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 0a26282997d..9bce6348592 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -1,3 +1,6 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + package finspace_test import ( From 0834c20027077a46eb6939e7269d48668b31d9dc Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:22:25 -0500 Subject: [PATCH 193/438] r/aws_finspace_kx_dataview: prefer create.AppendDiagError --- internal/service/finspace/kx_dataview.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index b18aaa8f7e8..09bc349afb7 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -148,7 +148,7 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta rId, err := flex.FlattenResourceId(idParts, kxDataviewIdPartCount, false) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err) } d.SetId(rId) @@ -156,7 +156,7 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta DatabaseName: aws.String(d.Get("database_name").(string)), DataviewName: aws.String(d.Get("name").(string)), EnvironmentId: aws.String(d.Get("environment_id").(string)), - AutoUpdate: *aws.Bool(d.Get("auto_update").(bool)), + AutoUpdate: d.Get("auto_update").(bool), AzMode: types.KxAzMode(d.Get("az_mode").(string)), ClientToken: aws.String(id.UniqueId()), Tags: getTagsIn(ctx), @@ -180,13 +180,13 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta out, err := conn.CreateKxDataview(ctx, in) if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), err) } if out == nil || out.DataviewName == nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output"))...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output")) } if _, err := waitKxDataviewCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err) } return append(diags, resourceKxDataviewRead(ctx, d, meta)...) @@ -204,7 +204,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in } if err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err) } d.Set("name", out.DataviewName) d.Set("description", out.Description) @@ -218,7 +218,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in d.Set("environment_id", out.EnvironmentId) d.Set("az_mode", out.AzMode) if err := d.Set("segment_configurations", flattenSegmentConfigurations(out.SegmentConfigurations)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err) } return diags @@ -243,11 +243,11 @@ func resourceKxDataviewUpdate(ctx context.Context, d *schema.ResourceData, meta } if _, err := conn.UpdateKxDataview(ctx, in); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionUpdating, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionUpdating, ResNameKxDataview, d.Get("name").(string), err) } if _, err := waitKxDataviewUpdated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForUpdate, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForUpdate, ResNameKxDataview, d.Get("name").(string), err) } return append(diags, resourceKxDataviewRead(ctx, d, meta)...) @@ -269,11 +269,11 @@ func resourceKxDataviewDelete(ctx context.Context, d *schema.ResourceData, meta if errors.As(err, &nfe) { return diags } - return append(diags, create.DiagError(names.FinSpace, create.ErrActionDeleting, ResNameKxDataview, d.Get("name").(string), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionDeleting, ResNameKxDataview, d.Get("name").(string), err) } if _, err := waitKxDataviewDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil && !tfresource.NotFound(err) { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxDataview, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForDeletion, ResNameKxDataview, d.Id(), err) } return diags } From ef5a2d87841c6583daa9963eeb3c352df901978b Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:24:22 -0500 Subject: [PATCH 194/438] r/aws_finspace_kx_dataview: alphabetize attributes --- internal/service/finspace/kx_dataview.go | 74 ++++++++++++------------ 1 file changed, 38 insertions(+), 36 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 09bc349afb7..add5b00bc2e 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -6,12 +6,16 @@ package finspace import ( "context" "errors" + "log" + "time" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -19,8 +23,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" - "log" - "time" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" @@ -50,36 +52,14 @@ func ResourceKxDataview() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "environment_id": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "database_name": { - Type: schema.TypeString, - Required: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(3, 63), - }, - "description": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringLenBetween(1, 1000), - }, "auto_update": { Type: schema.TypeBool, ForceNew: true, Required: true, }, - "changeset_id": { + "availability_zone_id": { Type: schema.TypeString, + ForceNew: true, Optional: true, }, "az_mode": { @@ -88,11 +68,41 @@ func ResourceKxDataview() *schema.Resource { ForceNew: true, ValidateDiagFunc: enum.Validate[types.KxAzMode](), }, - "availability_zone_id": { + "changeset_id": { Type: schema.TypeString, - ForceNew: true, Optional: true, }, + "created_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "database_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "description": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringLenBetween(1, 1000), + }, + "environment_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, + "last_modified_timestamp": { + Type: schema.TypeString, + Computed: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + ValidateFunc: validation.StringLenBetween(3, 63), + }, "segment_configurations": { Type: schema.TypeList, Elem: &schema.Resource{ @@ -116,14 +126,6 @@ func ResourceKxDataview() *schema.Resource { Type: schema.TypeString, Computed: true, }, - "created_timestamp": { - Type: schema.TypeString, - Computed: true, - }, - "last_modified_timestamp": { - Type: schema.TypeString, - Computed: true, - }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), }, From bfdec1686b0c8b3ace004054682417e016e2bc4f Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:25:04 -0500 Subject: [PATCH 195/438] chore: changelog --- .changelog/34828.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34828.txt diff --git a/.changelog/34828.txt b/.changelog/34828.txt new file mode 100644 index 00000000000..cfa7c1c5289 --- /dev/null +++ b/.changelog/34828.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +aws_finspace_kx_dataview +``` From 144aeb8cf5dadc22a6e27d50323734bd150826b8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 10:27:47 -0500 Subject: [PATCH 196/438] startReplication: replication NotFound is OK. --- internal/service/dms/replication_config.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index c5a8b15bf7d..b81793f6568 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -576,6 +576,10 @@ func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, a func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) error { replication, err := findReplicationByReplicationConfigARN(ctx, conn, arn) + if tfresource.NotFound(err) { + return nil + } + if err != nil { return fmt.Errorf("reading DMS Replication Config (%s) replication: %s", arn, err) } From cf484a231ec772f65d032f55413505782b6c8459 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:35:13 -0500 Subject: [PATCH 197/438] r/aws_finspace_kx_volume: nolintlint --- internal/service/finspace/kx_volume.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_volume.go b/internal/service/finspace/kx_volume.go index 72bad740dcc..0446eb223ce 100644 --- a/internal/service/finspace/kx_volume.go +++ b/internal/service/finspace/kx_volume.go @@ -346,7 +346,7 @@ func waitKxVolumeCreated(ctx context.Context, conn *finspace.Client, id string, return nil, err } -func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { //nolint:unparam +func waitKxVolumeUpdated(ctx context.Context, conn *finspace.Client, id string, timeout time.Duration) (*finspace.GetKxVolumeOutput, error) { stateConf := &retry.StateChangeConf{ Pending: enum.Slice(types.KxVolumeStatusCreating, types.KxVolumeStatusUpdating), Target: enum.Slice(types.KxVolumeStatusActive), From 6d8e014383f2136be2041c4c4f7a56e45e39d477 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:41:32 -0500 Subject: [PATCH 198/438] r/aws_finspace_kx_dataview(test): reorganize --- internal/service/finspace/kx_dataview_test.go | 152 +++++++++--------- 1 file changed, 73 insertions(+), 79 deletions(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 9bce6348592..1410cd96274 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -7,6 +7,8 @@ import ( "context" "errors" "fmt" + "testing" + "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" @@ -14,10 +16,10 @@ import ( "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" + "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" tffinspace "github.com/hashicorp/terraform-provider-aws/internal/service/finspace" "github.com/hashicorp/terraform-provider-aws/names" - "testing" ) func TestAccFinSpaceKxDataview_basic(t *testing.T) { @@ -26,7 +28,7 @@ func TestAccFinSpaceKxDataview_basic(t *testing.T) { } ctx := acctest.Context(t) - var kxdataview finspace.GetKxDataviewOutput + var dataview finspace.GetKxDataviewOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_dataview.test" @@ -42,7 +44,7 @@ func TestAccFinSpaceKxDataview_basic(t *testing.T) { { Config: testAccKxDataviewConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), resource.TestCheckResourceAttr(resourceName, "name", rName), resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), ), @@ -61,7 +63,7 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { } ctx := acctest.Context(t) - var kxdataview finspace.GetKxDataviewOutput + var dataview finspace.GetKxDataviewOutput rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_finspace_kx_dataview.test" @@ -77,7 +79,7 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { { Config: testAccKxDataviewConfig_basic(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), acctest.CheckResourceDisappears(ctx, acctest.Provider, tffinspace.ResourceKxDataview(), resourceName), ), ExpectNonEmptyPlan: true, @@ -86,38 +88,38 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { }) } -func testAccKxDataviewConfigBase(rName string) string { - return fmt.Sprintf(` -resource "aws_kms_key" "test" { - deletion_window_in_days = 7 -} +func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } -resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn -} -resource "aws_finspace_kx_database" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id -} -`, rName) -} -func testAccKxDataviewConfig_basic(rName string) string { - return acctest.ConfigCompose( - testAccKxDataviewConfigBase(rName), - fmt.Sprintf(` -resource "aws_finspace_kx_dataview" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - database_name = aws_finspace_kx_database.test.name - auto_update = true - az_mode = "SINGLE" - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] -} -`, rName)) + ctx := acctest.Context(t) + var dataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_withKxVolume(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), + ), + }, + }, + }) } -func testAccCheckKxDataviewExists(ctx context.Context, name string, kxdataview *finspace.GetKxDataviewOutput) resource.TestCheckFunc { +func testAccCheckKxDataviewExists(ctx context.Context, name string, dataview *finspace.GetKxDataviewOutput) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[name] if !ok { @@ -138,7 +140,7 @@ func testAccCheckKxDataviewExists(ctx context.Context, name string, kxdataview * return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) } - *kxdataview = *resp + *dataview = *resp return nil } @@ -170,8 +172,43 @@ func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { } } -func testAccKxDataviewVolumeBase(rName string) string { +func testAccKxDataviewConfigBase(rName string) string { return fmt.Sprintf(` +resource "aws_kms_key" "test" { + deletion_window_in_days = 7 +} + +resource "aws_finspace_kx_environment" "test" { + name = %[1]q + kms_key_id = aws_kms_key.test.arn +} + +resource "aws_finspace_kx_database" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} +`, rName) +} + +func testAccKxDataviewConfig_basic(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] +} +`, rName)) +} + +func testAccKxDataviewConfig_withKxVolume(rName string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` resource "aws_finspace_kx_volume" "test" { name = %[1]q environment_id = aws_finspace_kx_environment.test.id @@ -183,14 +220,7 @@ resource "aws_finspace_kx_volume" "test" { size= 1200 } } -`, rName) -} -func testAccKxDataviewConfig_withKxVolume(rName string) string { - return acctest.ConfigCompose( - testAccKxDataviewConfigBase(rName), - testAccKxDataviewVolumeBase(rName), - fmt.Sprintf(` resource "aws_finspace_kx_dataview" "test" { name = %[1]q environment_id = aws_finspace_kx_environment.test.id @@ -206,39 +236,3 @@ resource "aws_finspace_kx_dataview" "test" { } `, rName)) } - -func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { - if testing.Short() { - t.Skip("skipping long-running test in short mode") - } - ctx := acctest.Context(t) - - var kxdataview finspace.GetKxDataviewOutput - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_finspace_kx_dataview.test" - - resource.ParallelTest(t, resource.TestCase{ - - PreCheck: func() { - acctest.PreCheck(ctx, t) - acctest.PreCheckPartitionHasService(t, finspace.ServiceID) - }, - ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), - - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - - CheckDestroy: testAccCheckKxDataviewDestroy(ctx), - - Steps: []resource.TestStep{ - { - Config: testAccKxDataviewConfig_withKxVolume(rName), - - Check: resource.ComposeTestCheckFunc( - testAccCheckKxDataviewExists(ctx, resourceName, &kxdataview), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttr(resourceName, "status", string(types.KxDataviewStatusActive)), - ), - }, - }, - }) -} From db3b97354cb929c19ef49120650ae10cf53dde0c Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 10:44:25 -0500 Subject: [PATCH 199/438] r/aws_finspace_kx_dataview(test): prefer finder in test check func --- internal/service/finspace/kx_dataview.go | 6 +++--- internal/service/finspace/kx_dataview_test.go | 20 ++++++++----------- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index add5b00bc2e..a269c6b1172 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -198,7 +198,7 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) - out, err := findKxDataviewById(ctx, conn, d.Id()) + out, err := FindKxDataviewById(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] FinSpace KxDataview (%s) not found, removing from state", d.Id()) d.SetId("") @@ -280,7 +280,7 @@ func resourceKxDataviewDelete(ctx context.Context, d *schema.ResourceData, meta return diags } -func findKxDataviewById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxDataviewOutput, error) { +func FindKxDataviewById(ctx context.Context, conn *finspace.Client, id string) (*finspace.GetKxDataviewOutput, error) { idParts, err := flex.ExpandResourceId(id, kxDataviewIdPartCount, false) if err != nil { return nil, err @@ -364,7 +364,7 @@ func waitKxDataviewDeleted(ctx context.Context, conn *finspace.Client, id string func statusKxDataview(ctx context.Context, conn *finspace.Client, id string) retry.StateRefreshFunc { return func() (interface{}, string, error) { - out, err := findKxDataviewById(ctx, conn, id) + out, err := FindKxDataviewById(ctx, conn, id) if tfresource.NotFound(err) { return nil, "", nil } diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 1410cd96274..9f1d7fa9862 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -9,7 +9,6 @@ import ( "fmt" "testing" - "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" @@ -57,6 +56,7 @@ func TestAccFinSpaceKxDataview_basic(t *testing.T) { }, }) } + func TestAccFinSpaceKxDataview_disappears(t *testing.T) { if testing.Short() { t.Skip("Skipping test in short mode.") @@ -131,11 +131,8 @@ func testAccCheckKxDataviewExists(ctx context.Context, name string, dataview *fi } conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - resp, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ - DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - DataviewName: aws.String(rs.Primary.Attributes["name"]), - }) + + resp, err := tffinspace.FindKxDataviewById(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) } @@ -154,11 +151,8 @@ func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { } conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) - _, err := conn.GetKxDataview(ctx, &finspace.GetKxDataviewInput{ - DatabaseName: aws.String(rs.Primary.Attributes["database_name"]), - EnvironmentId: aws.String(rs.Primary.Attributes["environment_id"]), - DataviewName: aws.String(rs.Primary.Attributes["name"]), - }) + + _, err := tffinspace.FindKxDataviewById(ctx, conn, rs.Primary.ID) if err != nil { var nfe *types.ResourceNotFoundException if errors.As(err, &nfe) { @@ -166,8 +160,10 @@ func testAccCheckKxDataviewDestroy(ctx context.Context) resource.TestCheckFunc { } return err } - return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, err) + + return create.Error(names.FinSpace, create.ErrActionCheckingExistence, tffinspace.ResNameKxDataview, rs.Primary.ID, errors.New("not destroyed")) } + return nil } } From b0ca6929c144d4464e6b675d68509a898a3c036b Mon Sep 17 00:00:00 2001 From: Mayank Hirani Date: Thu, 14 Dec 2023 11:28:02 -0500 Subject: [PATCH 200/438] Fix acceptance test linting. --- internal/service/finspace/kx_dataview_test.go | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 9f1d7fa9862..cfe02b985c5 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -175,8 +175,8 @@ resource "aws_kms_key" "test" { } resource "aws_finspace_kx_environment" "test" { - name = %[1]q - kms_key_id = aws_kms_key.test.arn + name = %[1]q + kms_key_id = aws_kms_key.test.arn } resource "aws_finspace_kx_database" "test" { @@ -206,14 +206,14 @@ func testAccKxDataviewConfig_withKxVolume(rName string) string { testAccKxDataviewConfigBase(rName), fmt.Sprintf(` resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" + az_mode = "SINGLE" + type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + type= "SSD_250" + size= 1200 } } @@ -226,8 +226,8 @@ resource "aws_finspace_kx_dataview" "test" { availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] segment_configurations { - db_paths = ["/*"] - volume_name = aws_finspace_kx_volume.test.name + db_paths = ["/*"] + volume_name = aws_finspace_kx_volume.test.name } } `, rName)) From 3836f546c5fc78bfe85c611a2baa347cd12ccf9a Mon Sep 17 00:00:00 2001 From: David Hwang Date: Thu, 14 Dec 2023 11:55:44 -0500 Subject: [PATCH 201/438] Fix linting, rename tests --- internal/service/finspace/kx_cluster_test.go | 74 ++++++++++---------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/internal/service/finspace/kx_cluster_test.go b/internal/service/finspace/kx_cluster_test.go index 14451a2d9f1..9d69793554a 100644 --- a/internal/service/finspace/kx_cluster_test.go +++ b/internal/service/finspace/kx_cluster_test.go @@ -608,7 +608,7 @@ func TestAccFinSpaceKxCluster_ScalingGroup(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_RDBInScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -640,7 +640,7 @@ func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { }) } -func TestAccFinSpaceKxCluster_InScalingGroupWithKxVolume(t *testing.T) { +func TestAccFinSpaceKxCluster_TPInScalingGroupWithKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") } @@ -876,36 +876,36 @@ resource "aws_route" "r" { func testAccKxClusterConfigScalingGroupBase(rName string) string { return fmt.Sprintf(` - resource "aws_finspace_kx_scaling_group" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] - host_type = "kx.sg.4xlarge" - } +resource "aws_finspace_kx_scaling_group" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + host_type = "kx.sg.4xlarge" +} `, rName) } func testAccKxClusterConfigKxVolumeBase(rName string) string { return fmt.Sprintf(` - resource "aws_finspace_kx_volume" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] - az_mode = "SINGLE" - type = "NAS_1" - nas1_configuration { - type= "SSD_1000" - size= 1200 - } - } +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_1000" + size = 1200 + } +} `, rName) } func testAccKxClusterConfigKxDataviewBase(rName string) string { return fmt.Sprintf(` resource "aws_finspace_kx_database" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id } resource "aws_finspace_kx_dataview" "test" { @@ -964,10 +964,10 @@ resource "aws_finspace_kx_cluster" "test" { } scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } } `, rName)) @@ -980,9 +980,9 @@ func testAccKxRDBClusterConfigInScalingGroup_withKxVolume(rName string) string { testAccKxClusterConfigScalingGroupBase(rName), fmt.Sprintf(` resource "aws_finspace_kx_database" "test" { - name = %[1]q - environment_id = aws_finspace_kx_environment.test.id - } + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id +} resource "aws_finspace_kx_cluster" "test" { name = %[1]q @@ -999,10 +999,10 @@ resource "aws_finspace_kx_cluster" "test" { } scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } database { database_name = aws_finspace_kx_database.test.name @@ -1035,10 +1035,10 @@ resource "aws_finspace_kx_cluster" "test" { } scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } tickerplant_log_configuration { tickerplant_log_volumes = [aws_finspace_kx_volume.test.name] @@ -1069,15 +1069,15 @@ resource "aws_finspace_kx_cluster" "test" { scaling_group_configuration { scaling_group_name = aws_finspace_kx_scaling_group.test.name - memory_limit = 200 + memory_limit = 200 memory_reservation = 100 - node_count = 1 - cpu = 0.5 + node_count = 1 + cpu = 0.5 } database { database_name = aws_finspace_kx_database.test.name - dataview_name = aws_finspace_kx_dataview.test.name + dataview_name = aws_finspace_kx_dataview.test.name } lifecycle { From 583c90e6da85236ea8e9280600e9b5dd190c2680 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:39:58 -0500 Subject: [PATCH 202/438] r/aws_finspace_kx_dataview(test): fmt config --- internal/service/finspace/kx_dataview_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index cfe02b985c5..88096f6bc92 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -212,8 +212,8 @@ resource "aws_finspace_kx_volume" "test" { az_mode = "SINGLE" type = "NAS_1" nas1_configuration { - type= "SSD_250" - size= 1200 + size = 1200 + type = "SSD_250" } } @@ -227,7 +227,7 @@ resource "aws_finspace_kx_dataview" "test" { segment_configurations { db_paths = ["/*"] - volume_name = aws_finspace_kx_volume.test.name + volume_name = aws_finspace_kx_volume.test.name } } `, rName)) From 6c9ce2ae31858e5a514e62f438818b6d3953c62e Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:42:02 -0500 Subject: [PATCH 203/438] r/aws_finspace_kx_dataview(doc): fmt config, tidy descriptions --- website/docs/r/finspace_kx_dataview.html.markdown | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index 70a2b762c90..7816b6651d0 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -23,7 +23,7 @@ resource "aws_finspace_kx_dataview" "example" { description = "Terraform managed Kx Dataview" az_mode = "SINGLE" auto_update = true - + segment_configurations { volume_name = aws_finspace_kx_volume.example.name db_paths = ["/*"] @@ -36,25 +36,26 @@ resource "aws_finspace_kx_dataview" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: - * SINGLE - Assigns one availability zone per cluster. - * MULTI - Assigns all the availability zones per cluster. + * `SINGLE` - Assigns one availability zone per cluster. + * `MULTI` - Assigns all the availability zones per cluster. * `database_name` - (Required) The name of the database where you want to create a dataview. * `environment_id` - (Required) Unique identifier for the KX environment. * `name` - (Required) A unique identifier for the dataview. The following arguments are optional: + * `auto_update` - (Optional) The option to specify whether you want to apply all the future additions and corrections automatically to the dataview, when you ingest new changesets. The default value is false. * `availability_zone_id` - (Optional) The identifier of the availability zones. If attaching a volume, the volume must be in the same availability zone as the dataview that you are attaching to. * `changeset_id` - (Optional) A unique identifier of the changeset of the database that you want to use to ingest data. * `description` - (Optional) A description for the dataview. -* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations). +* `segment_configurations` - (Optional) The configuration that contains the database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. If you do not explicitly specify any database path for a volume, they are accessible from the cluster through the default S3/object store segment. See [segment_configurations](#segment_configurations-argument-reference) below. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. -### segment_configurations +### `segment_configurations` Argument Reference + * `db_paths` - (Required) The database path of the data that you want to place on each selected volume. Each segment must have a unique database path for each volume. * `volume_name` - (Required) The name of the volume that you want to attach to a dataview. This volume must be in the same availability zone as the dataview that you are attaching to. - ## Attribute Reference This resource exports the following attributes in addition to the arguments above: From 568d51a317508d1aefa5b9a9bf89eef63cc4adb7 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:49:52 -0500 Subject: [PATCH 204/438] r/aws_finspace_kx_dataview(doc): terrafmt, markdownlint fixes --- website/docs/r/finspace_kx_dataview.html.markdown | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index 7816b6651d0..ad2d6b51049 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -24,7 +24,7 @@ resource "aws_finspace_kx_dataview" "example" { az_mode = "SINGLE" auto_update = true - segment_configurations { + segment_configurations { volume_name = aws_finspace_kx_volume.example.name db_paths = ["/*"] } @@ -36,9 +36,9 @@ resource "aws_finspace_kx_dataview" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: - * `SINGLE` - Assigns one availability zone per cluster. - * `MULTI` - Assigns all the availability zones per cluster. -* `database_name` - (Required) The name of the database where you want to create a dataview. + * `SINGLE` - Assigns one availability zone per cluster. + * `MULTI` - Assigns all the availability zones per cluster. +* `database_name` - (Required) The name of the database where you want to create a dataview. * `environment_id` - (Required) Unique identifier for the KX environment. * `name` - (Required) A unique identifier for the dataview. @@ -63,7 +63,7 @@ This resource exports the following attributes in addition to the arguments abov * `arn` - Amazon Resource Name (ARN) identifier of the KX dataview. * `created_timestamp` - Timestamp at which the dataview was created in FinSpace. Value determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `id` - A comma-delimited string joining environment ID, database name and dataview name. -* `last_modified_timestamp` - The last time that the dataview was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. +* `last_modified_timestamp` - The last time that the dataview was updated in FinSpace. The value is determined as epoch time in milliseconds. For example, the value for Monday, November 1, 2021 12:00:00 PM UTC is specified as 1635768000000. * `tags_all` - Map of tags assigned to the resource, including those inherited from the provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block). ## Timeouts From f63ea254fda343a4612f96487bb0075682e0b470 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:51:50 -0500 Subject: [PATCH 205/438] r/aws_finspace_kx_dataview(test): terrafmt --- internal/service/finspace/kx_dataview_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 88096f6bc92..0dda532a630 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -226,7 +226,7 @@ resource "aws_finspace_kx_dataview" "test" { availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] segment_configurations { - db_paths = ["/*"] + db_paths = ["/*"] volume_name = aws_finspace_kx_volume.test.name } } From 0e0b6dded79c4ae46fd8710e2dcd902508c11ba8 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:55:18 -0500 Subject: [PATCH 206/438] r/aws_finspace_kx_dataview(doc): missed markdownlint --- website/docs/r/finspace_kx_dataview.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index ad2d6b51049..f7f77da7742 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -36,8 +36,8 @@ resource "aws_finspace_kx_dataview" "example" { The following arguments are required: * `az_mode` - (Required) The number of availability zones you want to assign per cluster. This can be one of the following: - * `SINGLE` - Assigns one availability zone per cluster. - * `MULTI` - Assigns all the availability zones per cluster. + * `SINGLE` - Assigns one availability zone per cluster. + * `MULTI` - Assigns all the availability zones per cluster. * `database_name` - (Required) The name of the database where you want to create a dataview. * `environment_id` - (Required) Unique identifier for the KX environment. * `name` - (Required) A unique identifier for the dataview. From 99d7818b8b18db0733a8e69fb8ee2adcaf3c7f67 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 12:56:43 -0500 Subject: [PATCH 207/438] dms: Add 'TaskSettings'. --- internal/service/dms/replication_config.go | 22 +-- internal/service/dms/task_settings_json.go | 158 +++++++++++++++++++++ 2 files changed, 164 insertions(+), 16 deletions(-) create mode 100644 internal/service/dms/task_settings_json.go diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index b81793f6568..73cc81bf697 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -113,21 +112,12 @@ func ResourceReplicationConfig() *schema.Resource { ForceNew: true, }, "replication_settings": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs( - `"historyTimeslotInMinutes"`, - `"EnableLogContext"`, - `"CloudWatchLogGroup"`, - `"CloudWatchLogStream"`, - ), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, DiffSuppressOnRefresh: true, - StateFunc: func(v interface{}) string { - json, _ := structure.NormalizeJsonString(v) - return json - }, }, "replication_type": { Type: schema.TypeString, @@ -243,7 +233,7 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set("replication_config_identifier", replicationConfig.ReplicationConfigIdentifier) - d.Set("replication_settings", replicationConfig.ReplicationSettings) + d.Set("replication_settings", flattenSettings(aws.StringValue(replicationConfig.ReplicationSettings))) d.Set("replication_type", replicationConfig.ReplicationType) d.Set("source_endpoint_arn", replicationConfig.SourceEndpointArn) d.Set("supplemental_settings", replicationConfig.SupplementalSettings) diff --git a/internal/service/dms/task_settings_json.go b/internal/service/dms/task_settings_json.go new file mode 100644 index 00000000000..e67f14943cb --- /dev/null +++ b/internal/service/dms/task_settings_json.go @@ -0,0 +1,158 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package dms + +import ( + "encoding/json" +) + +// https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html#CHAP_Tasks.CustomizingTasks.TaskSettings.Example +// https://mholt.github.io/json-to-go/ + +type TaskSettings struct { + TargetMetadata struct { + TargetSchema string `json:"TargetSchema,omitempty"` + SupportLobs bool `json:"SupportLobs,omitempty"` + FullLobMode bool `json:"FullLobMode,omitempty"` + LobChunkSize int `json:"LobChunkSize,omitempty"` + LimitedSizeLobMode bool `json:"LimitedSizeLobMode,omitempty"` + LobMaxSize int `json:"LobMaxSize,omitempty"` + InlineLobMaxSize int `json:"InlineLobMaxSize,omitempty"` + LoadMaxFileSize int `json:"LoadMaxFileSize,omitempty"` + ParallelLoadThreads int `json:"ParallelLoadThreads,omitempty"` + ParallelLoadBufferSize int `json:"ParallelLoadBufferSize,omitempty"` + ParallelLoadQueuesPerThread int `json:"ParallelLoadQueuesPerThread,omitempty"` + ParallelApplyThreads int `json:"ParallelApplyThreads,omitempty"` + ParallelApplyBufferSize int `json:"ParallelApplyBufferSize,omitempty"` + ParallelApplyQueuesPerThread int `json:"ParallelApplyQueuesPerThread,omitempty"` + BatchApplyEnabled bool `json:"BatchApplyEnabled,omitempty"` + TaskRecoveryTableEnabled bool `json:"TaskRecoveryTableEnabled,omitempty"` + } `json:"TargetMetadata,omitempty"` + FullLoadSettings struct { + TargetTablePrepMode string `json:"TargetTablePrepMode,omitempty"` + CreatePkAfterFullLoad bool `json:"CreatePkAfterFullLoad,omitempty"` + StopTaskCachedChangesApplied bool `json:"StopTaskCachedChangesApplied,omitempty"` + StopTaskCachedChangesNotApplied bool `json:"StopTaskCachedChangesNotApplied,omitempty"` + MaxFullLoadSubTasks int `json:"MaxFullLoadSubTasks,omitempty"` + TransactionConsistencyTimeout int `json:"TransactionConsistencyTimeout,omitempty"` + CommitRate int `json:"CommitRate,omitempty"` + } `json:"FullLoadSettings,omitempty"` + TTSettings struct { + EnableTT bool `json:"EnableTT,omitempty"` + TTS3Settings struct { + EncryptionMode string `json:"EncryptionMode,omitempty"` + ServerSideEncryptionKmsKeyID string `json:"ServerSideEncryptionKmsKeyId,omitempty"` + ServiceAccessRoleArn string `json:"ServiceAccessRoleArn,omitempty"` + BucketName string `json:"BucketName,omitempty"` + BucketFolder string `json:"BucketFolder,omitempty"` + EnableDeletingFromS3OnTaskDelete bool `json:"EnableDeletingFromS3OnTaskDelete,omitempty"` + } `json:"TTS3Settings,omitempty"` + TTRecordSettings struct { + EnableRawData bool `json:"EnableRawData,omitempty"` + OperationsToLog string `json:"OperationsToLog,omitempty"` + MaxRecordSize int `json:"MaxRecordSize,omitempty"` + } `json:"TTRecordSettings,omitempty"` + } `json:"TTSettings,omitempty"` + Logging struct { + EnableLogging bool `json:"EnableLogging,omitempty"` + } `json:"Logging,omitempty"` + ControlTablesSettings struct { + ControlSchema string `json:"ControlSchema,omitempty"` + HistoryTimeslotInMinutes int `json:"HistoryTimeslotInMinutes,omitempty"` + HistoryTableEnabled bool `json:"HistoryTableEnabled,omitempty"` + SuspendedTablesTableEnabled bool `json:"SuspendedTablesTableEnabled,omitempty"` + StatusTableEnabled bool `json:"StatusTableEnabled,omitempty"` + } `json:"ControlTablesSettings,omitempty"` + StreamBufferSettings struct { + StreamBufferCount int `json:"StreamBufferCount,omitempty"` + StreamBufferSizeInMB int `json:"StreamBufferSizeInMB,omitempty"` + } `json:"StreamBufferSettings,omitempty"` + ChangeProcessingTuning struct { + BatchApplyPreserveTransaction bool `json:"BatchApplyPreserveTransaction,omitempty"` + BatchApplyTimeoutMin int `json:"BatchApplyTimeoutMin,omitempty"` + BatchApplyTimeoutMax int `json:"BatchApplyTimeoutMax,omitempty"` + BatchApplyMemoryLimit int `json:"BatchApplyMemoryLimit,omitempty"` + BatchSplitSize int `json:"BatchSplitSize,omitempty"` + MinTransactionSize int `json:"MinTransactionSize,omitempty"` + CommitTimeout int `json:"CommitTimeout,omitempty"` + MemoryLimitTotal int `json:"MemoryLimitTotal,omitempty"` + MemoryKeepTime int `json:"MemoryKeepTime,omitempty"` + StatementCacheSize int `json:"StatementCacheSize,omitempty"` + } `json:"ChangeProcessingTuning,omitempty"` + ChangeProcessingDdlHandlingPolicy struct { + HandleSourceTableDropped bool `json:"HandleSourceTableDropped,omitempty"` + HandleSourceTableTruncated bool `json:"HandleSourceTableTruncated,omitempty"` + HandleSourceTableAltered bool `json:"HandleSourceTableAltered,omitempty"` + } `json:"ChangeProcessingDdlHandlingPolicy,omitempty"` + LoopbackPreventionSettings struct { + EnableLoopbackPrevention bool `json:"EnableLoopbackPrevention,omitempty"` + SourceSchema string `json:"SourceSchema,omitempty"` + TargetSchema string `json:"TargetSchema,omitempty"` + } `json:"LoopbackPreventionSettings,omitempty"` + CharacterSetSettings struct { + CharacterReplacements []struct { + SourceCharacterCodePoint int `json:"SourceCharacterCodePoint,omitempty"` + TargetCharacterCodePoint int `json:"TargetCharacterCodePoint,omitempty"` + } `json:"CharacterReplacements,omitempty"` + CharacterSetSupport struct { + CharacterSet string `json:"CharacterSet,omitempty"` + ReplaceWithCharacterCodePoint int `json:"ReplaceWithCharacterCodePoint,omitempty"` + } `json:"CharacterSetSupport,omitempty"` + } `json:"CharacterSetSettings,omitempty"` + BeforeImageSettings struct { + EnableBeforeImage bool `json:"EnableBeforeImage,omitempty"` + FieldName string `json:"FieldName,omitempty"` + ColumnFilter string `json:"ColumnFilter,omitempty"` + } `json:"BeforeImageSettings,omitempty"` + ErrorBehavior struct { + DataErrorPolicy string `json:"DataErrorPolicy,omitempty"` + DataTruncationErrorPolicy string `json:"DataTruncationErrorPolicy,omitempty"` + DataErrorEscalationPolicy string `json:"DataErrorEscalationPolicy,omitempty"` + DataErrorEscalationCount int `json:"DataErrorEscalationCount,omitempty"` + TableErrorPolicy string `json:"TableErrorPolicy,omitempty"` + TableErrorEscalationPolicy string `json:"TableErrorEscalationPolicy,omitempty"` + TableErrorEscalationCount int `json:"TableErrorEscalationCount,omitempty"` + RecoverableErrorCount int `json:"RecoverableErrorCount,omitempty"` + RecoverableErrorInterval int `json:"RecoverableErrorInterval,omitempty"` + RecoverableErrorThrottling bool `json:"RecoverableErrorThrottling,omitempty"` + RecoverableErrorThrottlingMax int `json:"RecoverableErrorThrottlingMax,omitempty"` + ApplyErrorDeletePolicy string `json:"ApplyErrorDeletePolicy,omitempty"` + ApplyErrorInsertPolicy string `json:"ApplyErrorInsertPolicy,omitempty"` + ApplyErrorUpdatePolicy string `json:"ApplyErrorUpdatePolicy,omitempty"` + ApplyErrorEscalationPolicy string `json:"ApplyErrorEscalationPolicy,omitempty"` + ApplyErrorEscalationCount int `json:"ApplyErrorEscalationCount,omitempty"` + FullLoadIgnoreConflicts bool `json:"FullLoadIgnoreConflicts,omitempty"` + } `json:"ErrorBehavior,omitempty"` + ValidationSettings struct { + EnableValidation bool `json:"EnableValidation,omitempty"` + ValidationMode string `json:"ValidationMode,omitempty"` + ThreadCount int `json:"ThreadCount,omitempty"` + PartitionSize int `json:"PartitionSize,omitempty"` + FailureMaxCount int `json:"FailureMaxCount,omitempty"` + RecordFailureDelayInMinutes int `json:"RecordFailureDelayInMinutes,omitempty"` + RecordSuspendDelayInMinutes int `json:"RecordSuspendDelayInMinutes,omitempty"` + MaxKeyColumnSize int `json:"MaxKeyColumnSize,omitempty"` + TableFailureMaxCount int `json:"TableFailureMaxCount,omitempty"` + ValidationOnly bool `json:"ValidationOnly,omitempty"` + HandleCollationDiff bool `json:"HandleCollationDiff,omitempty"` + RecordFailureDelayLimitInMinutes int `json:"RecordFailureDelayLimitInMinutes,omitempty"` + SkipLobColumns bool `json:"SkipLobColumns,omitempty"` + ValidationPartialLobSize int `json:"ValidationPartialLobSize,omitempty"` + ValidationQueryCdcDelaySeconds int `json:"ValidationQueryCdcDelaySeconds,omitempty"` + } `json:"ValidationSettings,omitempty"` +} + +func flattenSettings(apiObject string) string { + var taskSettings TaskSettings + + if err := json.Unmarshal([]byte(apiObject), &taskSettings); err != nil { + return apiObject + } + + if b, err := json.Marshal(&taskSettings); err != nil { + return apiObject + } else { + return string(b) + } +} From f46235636d1a666acb5caab2f8604a7491cc1d26 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:57:06 -0500 Subject: [PATCH 208/438] r/aws_finspace_kx_dataview: semgrep fix --- internal/service/finspace/kx_dataview.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index a269c6b1172..28e42c5fac6 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -375,7 +375,7 @@ func statusKxDataview(ctx context.Context, conn *finspace.Client, id string) ret } } -func expandDbPath(tfList []interface{}) []string { +func expandDBPath(tfList []interface{}) []string { if tfList == nil { return nil } @@ -397,7 +397,7 @@ func expandSegmentConfigurations(tfList []interface{}) []types.KxDataviewSegment m := v.(map[string]interface{}) s = append(s, types.KxDataviewSegmentConfiguration{ VolumeName: aws.String(m["volume_name"].(string)), - DbPaths: expandDbPath(m["db_paths"].([]interface{})), + DbPaths: expandDBPath(m["db_paths"].([]interface{})), }) } From 56bedf44ff4639874d9e166350e29376a1c7d667 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:58:09 -0500 Subject: [PATCH 209/438] r/aws_finspace_kx_dataview: importlint fix --- internal/service/finspace/kx_dataview.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 28e42c5fac6..07bc92180df 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -15,6 +15,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/enum" @@ -23,9 +25,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" ) // @SDKResource("aws_finspace_kx_dataview", name="Kx Dataview") From 95a4157290dc7ce51c8eeed999847bba618ef4aa Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 12:59:30 -0500 Subject: [PATCH 210/438] r/aws_finspace_kx_dataview(doc): fix title heading --- website/docs/r/finspace_kx_dataview.html.markdown | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/docs/r/finspace_kx_dataview.html.markdown b/website/docs/r/finspace_kx_dataview.html.markdown index f7f77da7742..44b77ca5aa9 100644 --- a/website/docs/r/finspace_kx_dataview.html.markdown +++ b/website/docs/r/finspace_kx_dataview.html.markdown @@ -3,10 +3,10 @@ subcategory: "FinSpace" layout: "aws" page_title: "AWS: aws_finspace_kx_dataview" description: |- - Terraform resource for managing an AWS FinSpace Kx Dataviewk. + Terraform resource for managing an AWS FinSpace Kx Dataview. --- -# Resource: aws_finspace_dataview +# Resource: aws_finspace_kx_dataview Terraform resource for managing an AWS FinSpace Kx Dataview. From d3ed118585662f0bbd6e2b377dd9d2713673ac0a Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 18:46:26 +0000 Subject: [PATCH 211/438] Update CHANGELOG.md for #34915 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fce62da4221..f41aab55250 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,7 @@ FEATURES: * **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_finspace_kx_scaling_group` ([#34832](https://github.com/hashicorp/terraform-provider-aws/issues/34832)) * **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: From 74cb6b501ba203ff0bc252beb59ac2e5f482e557 Mon Sep 17 00:00:00 2001 From: Tim Rogers Date: Tue, 16 May 2023 17:30:12 -0500 Subject: [PATCH 212/438] Added acceptance test for changing ALB stickiness type --- internal/service/elbv2/target_group_test.go | 93 +++++++++++++++++++++ 1 file changed, 93 insertions(+) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index e1e6fba7ec3..5a5f81c053a 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -1520,6 +1520,99 @@ func TestAccELBV2TargetGroup_Stickiness_updateAppEnabled(t *testing.T) { }) } +func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { + ctx := acctest.Context(t) + var conf elbv2.TargetGroup + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, elbv2.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckTargetGroupDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_appStickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "app_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + { + Config: testAccTargetGroupConfig_stickiness(rName, true, true), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckTargetGroupExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttrSet(resourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "port", "443"), + resource.TestCheckResourceAttr(resourceName, "protocol", "HTTPS"), + resource.TestCheckResourceAttrSet(resourceName, "vpc_id"), + resource.TestCheckResourceAttr(resourceName, "deregistration_delay", "200"), + resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), + resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.interval", "30"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.port", "8082"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.protocol", "HTTPS"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.timeout", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.healthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.unhealthy_threshold", "4"), + resource.TestCheckResourceAttr(resourceName, "health_check.0.matcher", "200"), + ), + }, + }, + }) +} + func TestAccELBV2TargetGroup_HealthCheck_update(t *testing.T) { ctx := acctest.Context(t) var conf elbv2.TargetGroup From 668fcd10ffbec5f91144c406f02b4fba4b25ea6c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 14:28:50 -0500 Subject: [PATCH 213/438] elbv2: Move constants. --- internal/service/elbv2/const.go | 26 ++++++++++++++++++++++++++ internal/service/elbv2/target_group.go | 20 -------------------- 2 files changed, 26 insertions(+), 20 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index ebd7a7df1d8..6ac49a89db8 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -3,6 +3,16 @@ package elbv2 +import ( + "time" + + "github.com/aws/aws-sdk-go/service/elbv2" +) + +const ( + propagationTimeout = 2 * time.Minute +) + const ( errCodeValidationError = "ValidationError" @@ -80,3 +90,19 @@ func httpXFFHeaderProcessingMode_Values() []string { httpXFFHeaderProcessingModeRemove, } } + +func healthCheckProtocolEnumValues() []string { + return []string{ + elbv2.ProtocolEnumHttp, + elbv2.ProtocolEnumHttps, + elbv2.ProtocolEnumTcp, + } +} + +func protocolVersionEnumValues() []string { + return []string{ + "GRPC", + "HTTP1", + "HTTP2", + } +} diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 5985a022985..ef5132402b0 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -32,26 +32,6 @@ import ( "github.com/hashicorp/terraform-provider-aws/names" ) -const ( - propagationTimeout = 2 * time.Minute -) - -func healthCheckProtocolEnumValues() []string { - return []string{ - elbv2.ProtocolEnumHttp, - elbv2.ProtocolEnumHttps, - elbv2.ProtocolEnumTcp, - } -} - -func protocolVersionEnumValues() []string { - return []string{ - "GRPC", - "HTTP1", - "HTTP2", - } -} - // @SDKResource("aws_alb_target_group", name="Target Group") // @SDKResource("aws_lb_target_group", name="Target Group") // @Tags(identifierAttribute="id") From e455fcf278bfa0b3f9ae9b49886c264f3e9b287e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 14:36:56 -0500 Subject: [PATCH 214/438] 'RemoveReadOnlyFields' -> 'RemoveFields'. --- internal/json/remove.go | 8 ++++---- internal/json/remove_test.go | 4 ++-- internal/verify/json.go | 9 ++++----- 3 files changed, 10 insertions(+), 11 deletions(-) diff --git a/internal/json/remove.go b/internal/json/remove.go index cd22e993b2a..43233c5db2d 100644 --- a/internal/json/remove.go +++ b/internal/json/remove.go @@ -9,14 +9,14 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/json/ujson" ) -// RemoveReadOnlyFields removes read-only (can't be specified in configuration) fields from a valid JSON string. -func RemoveReadOnlyFields(in string, roFields ...string) string { +// RemoveFields removes the specified fields from a valid JSON string. +func RemoveFields(in string, fields ...string) string { out := make([]byte, 0, len(in)) err := ujson.Walk([]byte(in), func(_ int, key, value []byte) bool { if len(key) != 0 { - for _, roField := range roFields { - if bytes.Equal(key, []byte(roField)) { + for _, field := range fields { + if bytes.Equal(key, []byte(field)) { // Remove the key and value from the output. return false } diff --git a/internal/json/remove_test.go b/internal/json/remove_test.go index 8c5bc527251..541ee288a56 100644 --- a/internal/json/remove_test.go +++ b/internal/json/remove_test.go @@ -7,7 +7,7 @@ import ( "testing" ) -func TestRemoveReadOnlyFields(t *testing.T) { +func TestRemoveFields(t *testing.T) { t.Parallel() testCases := []struct { @@ -37,7 +37,7 @@ func TestRemoveReadOnlyFields(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - if got, want := RemoveReadOnlyFields(testCase.input, `"plugins"`), testCase.want; got != want { + if got, want := RemoveFields(testCase.input, `"plugins"`), testCase.want; got != want { t.Errorf("RemoveReadOnlyFields(%q) = %q, want %q", testCase.input, got, want) } }) diff --git a/internal/verify/json.go b/internal/verify/json.go index 1439938560f..4d1a43d6ce6 100644 --- a/internal/verify/json.go +++ b/internal/verify/json.go @@ -210,16 +210,15 @@ func LegacyPolicyToSet(exist, new string) (string, error) { return policyToSet, nil } -// SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs returns a difference suppression function that compares -// two JSON strings and returns `true` if they are equivalent once read-only fields have been removed. -// Read-only fields are those that can't be specified in configuration (returned only from AWS API). -func SuppressEquivalentJSONRemovingReadOnlyFieldsDiffs(roFields ...string) schema.SchemaDiffSuppressFunc { +// SuppressEquivalentJSONRemovingFieldsDiffs returns a difference suppression function that compares +// two JSON strings and returns `true` if they are equivalent once the specified fields have been removed. +func SuppressEquivalentJSONRemovingFieldsDiffs(fields ...string) schema.SchemaDiffSuppressFunc { return func(k, old, new string, d *schema.ResourceData) bool { if !json.Valid([]byte(old)) || !json.Valid([]byte(new)) { return old == new } - old, new = tfjson.RemoveReadOnlyFields(old, roFields...), tfjson.RemoveReadOnlyFields(new, roFields...) + old, new = tfjson.RemoveFields(old, fields...), tfjson.RemoveFields(new, fields...) return JSONStringsEqual(old, new) } From ce68ad0fe44a79a050128338633622d71e1d6135 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 14:43:29 -0500 Subject: [PATCH 215/438] r/aws_finspace_kx_dataview: linter fixes --- internal/service/finspace/kx_dataview.go | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 07bc92180df..9618cd46bc3 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -30,7 +30,6 @@ import ( // @SDKResource("aws_finspace_kx_dataview", name="Kx Dataview") // @Tags(identifierAttribute="arn") func ResourceKxDataview() *schema.Resource { - return &schema.Resource{ CreateWithoutTimeout: resourceKxDataviewCreate, ReadWithoutTimeout: resourceKxDataviewRead, @@ -235,7 +234,7 @@ func resourceKxDataviewUpdate(ctx context.Context, d *schema.ResourceData, meta ClientToken: aws.String(id.UniqueId()), } - if v, ok := d.GetOk("changeset_id"); ok && d.HasChange("changeset_id") && d.Get("auto_update").(bool) != true { + if v, ok := d.GetOk("changeset_id"); ok && d.HasChange("changeset_id") && !d.Get("auto_update").(bool) { in.ChangesetId = aws.String(v.(string)) } @@ -300,7 +299,6 @@ func FindKxDataviewById(ctx context.Context, conn *finspace.Client, id string) ( LastError: err, LastRequest: in, } - } return nil, err } From 0c613614ef8bf613143cc00547214a37123c9fae Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 15:00:52 -0500 Subject: [PATCH 216/438] r/aws_finspace_kx_cluster: prefer create.AppendDiagError --- internal/service/finspace/kx_cluster.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/finspace/kx_cluster.go b/internal/service/finspace/kx_cluster.go index 6a6f26833e2..e00b091ffb4 100644 --- a/internal/service/finspace/kx_cluster.go +++ b/internal/service/finspace/kx_cluster.go @@ -589,11 +589,11 @@ func resourceKxClusterRead(ctx context.Context, d *schema.ResourceData, meta int } if err := d.Set("scaling_group_configuration", flattenScalingGroupConfiguration(out.ScalingGroupConfiguration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } if err := d.Set("tickerplant_log_configuration", flattenTickerplantLogConfiguration(out.TickerplantLogConfiguration)); err != nil { - return append(diags, create.DiagError(names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err)...) + return create.AppendDiagError(diags, names.FinSpace, create.ErrActionSetting, ResNameKxCluster, d.Id(), err) } // compose cluster ARN using environment ARN From 25339163223498e9188b4a9e108646f6e5861d60 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 15:03:03 -0500 Subject: [PATCH 217/438] r/aws_finspace_kx_cluster(doc): markdownlint fixes --- website/docs/r/finspace_kx_cluster.html.markdown | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/website/docs/r/finspace_kx_cluster.html.markdown b/website/docs/r/finspace_kx_cluster.html.markdown index f7d59e6d71a..13b06c8e474 100644 --- a/website/docs/r/finspace_kx_cluster.html.markdown +++ b/website/docs/r/finspace_kx_cluster.html.markdown @@ -93,7 +93,7 @@ The following arguments are optional: * `initialization_script` - (Optional) Path to Q program that will be run at launch of a cluster. This is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. * `savedown_storage_configuration` - (Optional) Size and type of the temporary storage that is used to hold data during the savedown process. This parameter is required when you choose `type` as RDB. All the data written to this storage space is lost when the cluster node is restarted. See [savedown_storage_configuration](#savedown_storage_configuration). * `scaling_group_configuration` - (Optional) The structure that stores the configuration details of a scaling group. -* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. +* `tickerplant_log_configuration` - A configuration to store Tickerplant logs. It consists of a list of volumes that will be mounted to your cluster. For the cluster type Tickerplant , the location of the TP volume on the cluster will be available by using the global variable .aws.tp_log_path. * `tags` - (Optional) Key-value mapping of resource tags. If configured with a provider [`default_tags` configuration block](/docs/providers/aws/index.html#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. ### auto_scaling_configuration @@ -151,7 +151,7 @@ The database block supports the following arguments: * `database_name` - (Required) Name of the KX database. * `cache_configurations` - (Optional) Configuration details for the disk cache to increase performance reading from a KX database mounted to the cluster. See [cache_configurations](#cache_configurations). * `changeset_id` - (Optional) A unique identifier of the changeset that is associated with the cluster. -* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. +* `dataview_name` - (Optional) The name of the dataview to be used for caching historical data on disk. You cannot update to a different dataview name once a cluster is created. Use `lifecycle` [`ignore_changes`](https://www.terraform.io/docs/configuration/meta-arguments/lifecycle.html#ignore_changes) for database to prevent any undesirable behaviors. #### cache_configurations @@ -184,7 +184,7 @@ The vpc_configuration block supports the following arguments: * `memory_reservation` - (Required) A reservation of the minimum amount of memory that should be available on the scaling group for a kdb cluster to be successfully placed in a scaling group. * `node_count` - (Required) The number of kdb cluster nodes. * `cpu` - The number of vCPUs that you want to reserve for each node of this kdb cluster on the scaling group host. -* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. +* `memory_limit` - An optional hard limit on the amount of memory a kdb cluster can use. ### tickerplant_log_configuration From eaa4f07d225476dd86cb46e10e231e840da4ea7c Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 14 Dec 2023 14:04:12 -0600 Subject: [PATCH 218/438] aws_ecr_image: use defined Find func --- internal/service/ecr/image_data_source.go | 20 ++++++++------------ 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/internal/service/ecr/image_data_source.go b/internal/service/ecr/image_data_source.go index 96bf9df4910..42ee4ff20d1 100644 --- a/internal/service/ecr/image_data_source.go +++ b/internal/service/ecr/image_data_source.go @@ -5,6 +5,7 @@ package ecr import ( "context" + "fmt" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ecr" @@ -133,29 +134,24 @@ func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, meta inter imageDetail := imageDetails[0] - params2 := &ecr.DescribeRepositoriesInput{ - RepositoryNames: []*string{imageDetail.RepositoryName}, + repositoryName := aws.StringValue(imageDetail.RepositoryName) + repositoryInput := &ecr.DescribeRepositoriesInput{ + RepositoryNames: aws.StringSlice([]string{repositoryName}), RegistryId: imageDetail.RegistryId, } - var repositoryDetails []*ecr.Repository - err2 := conn.DescribeRepositoriesPages(params2, func(page *ecr.DescribeRepositoriesOutput, lastPage bool) bool { - repositoryDetails = append(repositoryDetails, page.Repositories...) - return true - }) + repository, err := FindRepository(ctx, conn, repositoryInput) - if err2 != nil { - return sdkdiag.AppendErrorf(diags, "reading ECR repositories: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ECR Images: %s", err) } - repository := repositoryDetails[0] - d.SetId(aws.StringValue(imageDetail.ImageDigest)) d.Set("image_digest", imageDetail.ImageDigest) d.Set("image_pushed_at", imageDetail.ImagePushedAt.Unix()) d.Set("image_size_in_bytes", imageDetail.ImageSizeInBytes) d.Set("image_tags", aws.StringValueSlice(imageDetail.ImageTags)) - d.Set("image_uri", aws.String(aws.StringValue(repository.RepositoryUri)+"@"+aws.StringValue(imageDetail.ImageDigest))) + d.Set("image_uri", fmt.Sprintf("%s@%s", aws.StringValue(repository.RepositoryUri), aws.StringValue(imageDetail.ImageDigest))) d.Set("registry_id", imageDetail.RegistryId) d.Set("repository_name", imageDetail.RepositoryName) From 9f3f49b57d99d4a4fdaf7e18909f36c7799437b0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 15:06:39 -0500 Subject: [PATCH 219/438] chore: changelog --- .changelog/34831.txt | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .changelog/34831.txt diff --git a/.changelog/34831.txt b/.changelog/34831.txt new file mode 100644 index 00000000000..e7c0098c35a --- /dev/null +++ b/.changelog/34831.txt @@ -0,0 +1,6 @@ +```release-note:enhancement +resource/aws_finspace_kx_cluster: Add `database.dataview_name`, `scaling_group_configuration`, and `tickerplant_log_configuration` arguments. +``` +```release-note:enhancement +resource/aws_finspace_kx_cluster: The `capacity_configuration` argument is now optional. +``` From 81c0e43bf4575371000fb65a5e8df4bee5ed4a64 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 15:17:43 -0500 Subject: [PATCH 220/438] r/aws_lb_target_group: Tidy up Delete. --- internal/service/elbv2/const.go | 4 +++ internal/service/elbv2/target_group.go | 37 +++++++------------------- 2 files changed, 13 insertions(+), 28 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 6ac49a89db8..111a320a0eb 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -91,6 +91,10 @@ func httpXFFHeaderProcessingMode_Values() []string { } } +const ( + healthCheckPortTrafficPort = "traffic-port" +) + func healthCheckProtocolEnumValues() []string { return []string{ elbv2.ProtocolEnumHttp, diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index ef5132402b0..1f0777d28a2 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -114,7 +114,7 @@ func ResourceTargetGroup() *schema.Resource { "port": { Type: schema.TypeString, Optional: true, - Default: "traffic-port", + Default: healthCheckPortTrafficPort, ValidateFunc: validTargetGroupHealthCheckPort, DiffSuppressFunc: suppressIfTargetType(elbv2.TargetTypeEnumLambda), }, @@ -877,36 +877,17 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - const ( - targetGroupDeleteTimeout = 2 * time.Minute - ) conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - input := &elbv2.DeleteTargetGroupInput{ - TargetGroupArn: aws.String(d.Id()), - } - - log.Printf("[DEBUG] Deleting Target Group (%s): %s", d.Id(), input) - err := retry.RetryContext(ctx, targetGroupDeleteTimeout, func() *retry.RetryError { - _, err := conn.DeleteTargetGroupWithContext(ctx, input) - - if tfawserr.ErrMessageContains(err, "ResourceInUse", "is currently in use by a listener or a rule") { - return retry.RetryableError(err) - } - - if err != nil { - return retry.NonRetryableError(err) - } - - return nil - }) - - if tfresource.TimedOut(err) { - _, err = conn.DeleteTargetGroupWithContext(ctx, input) - } + log.Printf("[DEBUG] Deleting ELBv2 Target Group: %s", d.Id()) + _, err := tfresource.RetryWhenAWSErrMessageContains(ctx, 2*time.Minute, func() (interface{}, error) { + return conn.DeleteTargetGroupWithContext(ctx, &elbv2.DeleteTargetGroupInput{ + TargetGroupArn: aws.String(d.Id()), + }) + }, elbv2.ErrCodeResourceInUseException, "is currently in use by a listener or a rule") if err != nil { - return sdkdiag.AppendErrorf(diags, "deleting Target Group: %s", err) + return sdkdiag.AppendErrorf(diags, "deleting ELBv2 Target Group (%s): %s", d.Id(), err) } return diags @@ -1028,7 +1009,7 @@ func validateSlowStart(v interface{}, k string) (ws []string, errors []error) { func validTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value == "traffic-port" { + if value == healthCheckPortTrafficPort { return } From ddce4ea724808ac1d12ab021d10d31083e127892 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 15:22:41 -0500 Subject: [PATCH 221/438] Tidy up 'findTargetGroup'. --- internal/service/elbv2/target_group.go | 38 ++++++++----------- .../service/elbv2/target_group_data_source.go | 2 +- 2 files changed, 16 insertions(+), 24 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 1f0777d28a2..48c0151be64 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -365,7 +365,7 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta create.WithConfiguredPrefix(d.Get("name_prefix").(string)), create.WithDefaultPrefix("tf-"), ).Generate() - exist, err := FindTargetGroupByName(ctx, conn, name) + exist, err := findTargetGroupByName(ctx, conn, name) if err != nil && !tfresource.NotFound(err) { return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s): %s", name, err) @@ -898,7 +898,7 @@ func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (* TargetGroupArns: aws.StringSlice([]string{arn}), } - output, err := FindTargetGroup(ctx, conn, input) + output, err := findTargetGroup(ctx, conn, input) if err != nil { return nil, err @@ -914,12 +914,12 @@ func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (* return output, nil } -func FindTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) (*elbv2.TargetGroup, error) { +func findTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) (*elbv2.TargetGroup, error) { input := &elbv2.DescribeTargetGroupsInput{ Names: aws.StringSlice([]string{name}), } - output, err := FindTargetGroup(ctx, conn, input) + output, err := findTargetGroup(ctx, conn, input) if err != nil { return nil, err @@ -935,7 +935,17 @@ func FindTargetGroupByName(ctx context.Context, conn *elbv2.ELBV2, name string) return output, nil } -func FindTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) ([]*elbv2.TargetGroup, error) { +func findTargetGroup(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) (*elbv2.TargetGroup, error) { + output, err := findTargetGroups(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) ([]*elbv2.TargetGroup, error) { var output []*elbv2.TargetGroup err := conn.DescribeTargetGroupsPagesWithContext(ctx, input, func(page *elbv2.DescribeTargetGroupsOutput, lastPage bool) bool { @@ -966,24 +976,6 @@ func FindTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.Descr return output, nil } -func FindTargetGroup(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetGroupsInput) (*elbv2.TargetGroup, error) { - output, err := FindTargetGroups(ctx, conn, input) - - if err != nil { - return nil, err - } - - if len(output) == 0 || output[0] == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(output); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return output[0], nil -} - func validTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !strings.HasPrefix(value, "/") { diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index e8ecfe7e5d5..1dd94c9293c 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -184,7 +184,7 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta input.Names = aws.StringSlice([]string{v.(string)}) } - results, err := FindTargetGroups(ctx, conn, input) + results, err := findTargetGroups(ctx, conn, input) if err != nil { return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Groups: %s", err) From 3ae4ffea43f50fc7a66c1c12bea020edd667466b Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 14 Dec 2023 14:37:30 -0600 Subject: [PATCH 222/438] aws_ecr_image: find most recent --- internal/service/ecr/image_data_source.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/internal/service/ecr/image_data_source.go b/internal/service/ecr/image_data_source.go index 42ee4ff20d1..8932629b1c0 100644 --- a/internal/service/ecr/image_data_source.go +++ b/internal/service/ecr/image_data_source.go @@ -102,6 +102,18 @@ func dataSourceImageRead(ctx context.Context, d *schema.ResourceData, meta inter } } + if v, ok := d.Get("most_recent").(bool); ok && v { + if len(input.ImageIds) == 0 { + input.ImageIds = []*ecr.ImageIdentifier{ + { + ImageTag: aws.String("latest"), + }, + } + } else { + input.ImageIds[0].ImageTag = aws.String("latest") + } + } + if v, ok := d.GetOk("registry_id"); ok { input.RegistryId = aws.String(v.(string)) } From 434d9b4c3a25b528e2b158ce0703c340e76a0bed Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 14 Dec 2023 14:45:32 -0600 Subject: [PATCH 223/438] tweak CHANGELOG entry --- .changelog/24526.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/24526.txt b/.changelog/24526.txt index 7b392ba8fb6..92988183f15 100644 --- a/.changelog/24526.txt +++ b/.changelog/24526.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -data-source/aws_ecr_image: Add image_uri attribute +data-source/aws_ecr_image: Add `image_uri` attribute ``` \ No newline at end of file From 3fcd57237499d4be9e4398debe680c127e6a0318 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 14 Dec 2023 15:40:57 -0500 Subject: [PATCH 224/438] Add eks properties --- internal/service/batch/eks_properties.go | 358 ++++++++++++++++++ internal/service/batch/job_definition.go | 260 ++++++++++++- internal/service/batch/job_definition_test.go | 174 +++++++++ .../r/batch_job_definition.html.markdown | 2 +- .../r/batch_job_definition.html.markdown | 2 +- .../docs/r/batch_job_definition.html.markdown | 79 +++- 6 files changed, 866 insertions(+), 9 deletions(-) create mode 100644 internal/service/batch/eks_properties.go diff --git a/internal/service/batch/eks_properties.go b/internal/service/batch/eks_properties.go new file mode 100644 index 00000000000..fd26900cd1a --- /dev/null +++ b/internal/service/batch/eks_properties.go @@ -0,0 +1,358 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package batch + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/batch" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-provider-aws/internal/flex" +) + +func expandEKSPodProperties(podPropsMap map[string]interface{}) *batch.EksPodProperties { + podProps := &batch.EksPodProperties{} + + if v, ok := podPropsMap["containers"]; ok { + containers := v.([]interface{}) + podProps.Containers = expandContainers(containers) + } + + if v, ok := podPropsMap["dns_policy"].(string); ok && v != "" { + podProps.DnsPolicy = aws.String(v) + } + + if v, ok := podPropsMap["host_network"]; ok { + podProps.HostNetwork = aws.Bool(v.(bool)) + } + if m, ok := podPropsMap["metadata"].([]interface{}); ok && len(m) > 0 { + if v, ok := m[0].(map[string]interface{})["labels"]; ok { + podProps.Metadata = &batch.EksMetadata{} + podProps.Metadata.Labels = flex.ExpandStringMap(v.(map[string]interface{})) + } + } + if v, ok := podPropsMap["service_account_name"].(string); ok && v != "" { + podProps.ServiceAccountName = aws.String(v) + } + if v, ok := podPropsMap["volumes"]; ok { + podProps.Volumes = expandVolumes(v.([]interface{})) + } + + return podProps +} + +func expandContainers(containers []interface{}) []*batch.EksContainer { + var result []*batch.EksContainer + + for _, v := range containers { + containerMap := v.(map[string]interface{}) + container := &batch.EksContainer{} + + if v, ok := containerMap["args"]; ok { + container.Args = flex.ExpandStringList(v.([]interface{})) + } + + if v, ok := containerMap["command"]; ok { + container.Command = flex.ExpandStringList(v.([]interface{})) + } + + if v, ok := containerMap["env"].(*schema.Set); ok && v.Len() > 0 { + env := []*batch.EksContainerEnvironmentVariable{} + for _, e := range v.List() { + environment := &batch.EksContainerEnvironmentVariable{} + environ := e.(map[string]interface{}) + if v, ok := environ["name"].(string); ok && v != "" { + environment.Name = aws.String(v) + } + if v, ok := environ["value"].(string); ok && v != "" { + environment.Value = aws.String(v) + } + env = append(env, environment) + } + container.Env = env + } + + if v, ok := containerMap["image"]; ok { + container.Image = aws.String(v.(string)) + } + if v, ok := containerMap["image_pull_policy"].(string); ok && v != "" { + container.ImagePullPolicy = aws.String(v) + } + + if v, ok := containerMap["name"].(string); ok && v != "" { + container.Name = aws.String(v) + } + if r, ok := containerMap["resources"].([]interface{}); ok && len(r) > 0 { + resources := &batch.EksContainerResourceRequirements{} + res := r[0].(map[string]interface{}) + if v, ok := res["limits"]; ok { + resources.Limits = flex.ExpandStringMap(v.(map[string]interface{})) + } + if v, ok := res["requests"]; ok { + resources.Requests = flex.ExpandStringMap(v.(map[string]interface{})) + } + container.Resources = resources + } + + if s, ok := containerMap["security_context"].([]interface{}); ok && len(s) > 0 { + securityContext := &batch.EksContainerSecurityContext{} + security := s[0].(map[string]interface{}) + if v, ok := security["privileged"]; ok { + securityContext.Privileged = aws.Bool(v.(bool)) + } + if v, ok := security["run_as_user"]; ok { + securityContext.RunAsUser = aws.Int64(int64(v.(int))) + } + if v, ok := security["run_as_group"]; ok { + securityContext.RunAsGroup = aws.Int64(int64(v.(int))) + } + if v, ok := security["read_only_root_file_system"]; ok { + securityContext.ReadOnlyRootFilesystem = aws.Bool(v.(bool)) + } + if v, ok := security["run_as_non_root"]; ok { + securityContext.RunAsNonRoot = aws.Bool(v.(bool)) + } + container.SecurityContext = securityContext + } + if v, ok := containerMap["volume_mounts"]; ok { + container.VolumeMounts = expandVolumeMounts(v.([]interface{})) + } + + result = append(result, container) + } + + return result +} + +func expandVolumes(volumes []interface{}) []*batch.EksVolume { + var result []*batch.EksVolume + for _, v := range volumes { + volume := &batch.EksVolume{} + volumeMap := v.(map[string]interface{}) + if v, ok := volumeMap["name"].(string); ok { + volume.Name = aws.String(v) + } + if e, ok := volumeMap["empty_dir"].([]interface{}); ok && len(e) > 0 { + if empty, ok := e[0].(map[string]interface{}); ok { + volume.EmptyDir = &batch.EksEmptyDir{ + Medium: aws.String(empty["medium"].(string)), + SizeLimit: aws.String(empty["size_limit"].(string)), + } + } + } + if h, ok := volumeMap["host_path"].([]interface{}); ok && len(h) > 0 { + volume.HostPath = &batch.EksHostPath{} + if host, ok := h[0].(map[string]interface{}); ok { + if v, ok := host["path"]; ok { + volume.HostPath.Path = aws.String(v.(string)) + } + } + } + if s, ok := volumeMap["secret"].([]interface{}); ok && len(s) > 0 { + volume.Secret = &batch.EksSecret{} + if secret := s[0].(map[string]interface{}); ok { + if v, ok := secret["secret_name"]; ok { + volume.Secret.SecretName = aws.String(v.(string)) + } + if v, ok := secret["optional"]; ok { + volume.Secret.Optional = aws.Bool(v.(bool)) + } + } + } + result = append(result, volume) + } + + return result +} + +func expandVolumeMounts(volumeMounts []interface{}) []*batch.EksContainerVolumeMount { + var result []*batch.EksContainerVolumeMount + for _, v := range volumeMounts { + volumeMount := &batch.EksContainerVolumeMount{} + volumeMountMap := v.(map[string]interface{}) + if v, ok := volumeMountMap["name"]; ok { + volumeMount.Name = aws.String(v.(string)) + } + if v, ok := volumeMountMap["mount_path"]; ok { + volumeMount.MountPath = aws.String(v.(string)) + } + if v, ok := volumeMountMap["read_only"]; ok { + volumeMount.ReadOnly = aws.Bool(v.(bool)) + } + result = append(result, volumeMount) + } + + return result +} + +func flattenEKSProperties(eksProperties *batch.EksProperties) []interface{} { + var eksPropertiesList []interface{} + if eksProperties == nil { + return eksPropertiesList + } + if v := eksProperties.PodProperties; v != nil { + eksPropertiesList = append(eksPropertiesList, map[string]interface{}{ + "pod_properties": flattenEKSPodProperties(eksProperties.PodProperties), + }) + } + + return eksPropertiesList +} + +func flattenEKSPodProperties(podProperties *batch.EksPodProperties) (tfList []interface{}) { + tfMap := make(map[string]interface{}, 0) + if v := podProperties.Containers; v != nil { + tfMap["containers"] = flattenEKSContainers(v) + } + + if v := podProperties.DnsPolicy; v != nil { + tfMap["dns_policy"] = aws.StringValue(v) + } + + if v := podProperties.HostNetwork; v != nil { + tfMap["host_network"] = aws.BoolValue(v) + } + + if v := podProperties.Metadata; v != nil { + metaData := make([]map[string]interface{}, 0) + if v := v.Labels; v != nil { + metaData = append(metaData, map[string]interface{}{"labels": flex.FlattenStringMap(v)}) + } + tfMap["metadata"] = metaData + } + + if v := podProperties.ServiceAccountName; v != nil { + tfMap["service_account_name"] = aws.StringValue(v) + } + + if v := podProperties.Volumes; v != nil { + tfMap["volumes"] = flattenEKSVolumes(v) + } + + tfList = append(tfList, tfMap) + return tfList +} +func flattenEKSContainers(containers []*batch.EksContainer) (tfList []interface{}) { + for _, container := range containers { + tfMap := map[string]interface{}{} + + if v := container.Args; v != nil { + tfMap["args"] = flex.FlattenStringList(v) + } + + if v := container.Command; v != nil { + tfMap["command"] = flex.FlattenStringList(v) + } + + if v := container.Env; v != nil { + tfMap["env"] = flattenEKSContainerEnvironmentVariables(v) + } + + if v := container.Image; v != nil { + tfMap["image"] = aws.StringValue(v) + } + + if v := container.ImagePullPolicy; v != nil { + tfMap["image_pull_policy"] = aws.StringValue(v) + } + + if v := container.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := container.Resources; v != nil { + tfMap["resources"] = []map[string]interface{}{{ + "limits": flex.FlattenStringMap(v.Limits), + "requests": flex.FlattenStringMap(v.Requests), + }} + } + + if v := container.SecurityContext; v != nil { + tfMap["security_context"] = []map[string]interface{}{{ + "privileged": aws.BoolValue(v.Privileged), + "run_as_user": aws.Int64Value(v.RunAsUser), + "run_as_group": aws.Int64Value(v.RunAsGroup), + "read_only_root_file_system": aws.BoolValue(v.ReadOnlyRootFilesystem), + "run_as_non_root": aws.BoolValue(v.RunAsNonRoot), + }} + } + + if v := container.VolumeMounts; v != nil { + tfMap["volume_mounts"] = flattenEKSContainerVolumeMounts(v) + } + tfList = append(tfList, tfMap) + } + + return tfList +} + +func flattenEKSContainerEnvironmentVariables(env []*batch.EksContainerEnvironmentVariable) (tfList []interface{}) { + for _, e := range env { + tfMap := map[string]interface{}{} + + if v := e.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := e.Value; v != nil { + tfMap["value"] = aws.StringValue(v) + } + tfList = append(tfList, tfMap) + } + + return tfList +} + +func flattenEKSContainerVolumeMounts(volumeMounts []*batch.EksContainerVolumeMount) (tfList []interface{}) { + for _, v := range volumeMounts { + tfMap := map[string]interface{}{} + + if v := v.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := v.MountPath; v != nil { + tfMap["mount_path"] = aws.StringValue(v) + } + + if v := v.ReadOnly; v != nil { + tfMap["read_only"] = aws.BoolValue(v) + } + tfList = append(tfList, tfMap) + } + + return tfList +} + +func flattenEKSVolumes(volumes []*batch.EksVolume) (tfList []interface{}) { + for _, v := range volumes { + tfMap := map[string]interface{}{} + + if v := v.Name; v != nil { + tfMap["name"] = aws.StringValue(v) + } + + if v := v.EmptyDir; v != nil { + tfMap["empty_dir"] = []map[string]interface{}{{ + "medium": aws.StringValue(v.Medium), + "size_limit": aws.StringValue(v.SizeLimit), + }} + } + + if v := v.HostPath; v != nil { + tfMap["host_path"] = []map[string]interface{}{{ + "path": aws.StringValue(v.Path), + }} + } + + if v := v.Secret; v != nil { + tfMap["secret"] = []map[string]interface{}{{ + "secret_name": aws.StringValue(v.SecretName), + "optional": aws.BoolValue(v.Optional), + }} + } + tfList = append(tfList, tfMap) + } + + return tfList +} diff --git a/internal/service/batch/job_definition.go b/internal/service/batch/job_definition.go index ae68a3c33fc..d7386a1a169 100644 --- a/internal/service/batch/job_definition.go +++ b/internal/service/batch/job_definition.go @@ -48,9 +48,10 @@ func ResourceJobDefinition() *schema.Resource { Computed: true, }, "container_properties": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"eks_properties"}, StateFunc: func(v interface{}) string { json, _ := structure.NormalizeJsonString(v) return json @@ -69,9 +70,9 @@ func ResourceJobDefinition() *schema.Resource { ValidateFunc: validName, }, "node_properties": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, + Type: schema.TypeString, + Optional: true, + ConflictsWith: []string{"eks_properties"}, StateFunc: func(v interface{}) string { json, _ := structure.NormalizeJsonString(v) return json @@ -82,6 +83,235 @@ func ResourceJobDefinition() *schema.Resource { }, ValidateFunc: validJobNodeProperties, }, + "eks_properties": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + ConflictsWith: []string{"container_properties", "node_properties"}, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "pod_properties": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containers": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "args": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "env": { + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + }, + "value": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "image": { + Type: schema.TypeString, + Required: true, + }, + "image_pull_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{ + "Always", + "IfNotPresent", + "Never", + }, false), + }, + "name": { + Type: schema.TypeString, + Optional: true, + }, + "resources": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "requests": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "security_context": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "privileged": { + Type: schema.TypeBool, + Optional: true, + }, + "read_only_root_file_system": { + Type: schema.TypeBool, + Optional: true, + }, + "run_as_group": { + Type: schema.TypeInt, + Optional: true, + }, + "run_as_non_root": { + Type: schema.TypeBool, + Optional: true, + }, + "run_as_user": { + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "volume_mounts": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Required: true, + }, + "name": { + Type: schema.TypeString, + Required: true, + }, + "read_only": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + "dns_policy": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice([]string{"Default", "ClusterFirst", "ClusterFirstWithHostNet"}, false), + }, + "host_network": { + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "metadata": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "labels": { + Type: schema.TypeMap, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "service_account_name": { + Type: schema.TypeString, + Optional: true, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "empty_dir": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "medium": { + Type: schema.TypeString, + Optional: true, + Default: "", + ValidateFunc: validation.StringInSlice([]string{"", "Memory"}, true), + }, + "size_limit": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "host_path": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "path": { + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Default: "Default", + }, + "secret": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_name": { + Type: schema.TypeString, + Required: true, + }, + "optional": { + Type: schema.TypeBool, + Optional: true, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, "parameters": { Type: schema.TypeMap, Optional: true, @@ -229,12 +459,26 @@ func resourceJobDefinitionCreate(ctx context.Context, d *schema.ResourceData, me input.ContainerProperties = props } } + if v, ok := d.GetOk("eks_properties"); ok && len(v.([]interface{})) > 0 { + eksProps := v.([]interface{})[0].(map[string]interface{}) + if podProps, ok := eksProps["pod_properties"].([]interface{}); ok && len(podProps) > 0 { + if aws.StringValue(input.Type) == batch.JobDefinitionTypeContainer { + props := expandEKSPodProperties(podProps[0].(map[string]interface{})) + input.EksProperties = &batch.EksProperties{ + PodProperties: props, + } + } + } + } } if jobDefinitionType == batch.JobDefinitionTypeMultinode { if v, ok := d.GetOk("container_properties"); ok && v != nil { return sdkdiag.AppendErrorf(diags, "No `container_properties` can be specified when `type` is %q", jobDefinitionType) } + if v, ok := d.GetOk("eks_properties"); ok && v != nil { + return sdkdiag.AppendErrorf(diags, "No `eks_properties` can be specified when `type` is %q", jobDefinitionType) + } if v, ok := d.GetOk("node_properties"); ok { props, err := expandJobNodeProperties(v.(string)) @@ -314,6 +558,10 @@ func resourceJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "setting node_properties: %s", err) } + if err := d.Set("eks_properties", flattenEKSProperties(jobDefinition.EksProperties)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting eks_properties: %s", err) + } + d.Set("name", jobDefinition.JobDefinitionName) d.Set("parameters", aws.StringValueMap(jobDefinition.Parameters)) d.Set("platform_capabilities", aws.StringValueSlice(jobDefinition.PlatformCapabilities)) diff --git a/internal/service/batch/job_definition_test.go b/internal/service/batch/job_definition_test.go index fd997a8627e..4b27734f041 100644 --- a/internal/service/batch/job_definition_test.go +++ b/internal/service/batch/job_definition_test.go @@ -567,6 +567,77 @@ func TestAccBatchJobDefinition_NodePropertiesupdateForcesNewResource(t *testing. }) } +func TestAccBatchJobDefinition_EKSProperties_basic(t *testing.T) { + ctx := acctest.Context(t) + var jd batch.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionConfig_EKSProperties_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &jd), + resource.TestCheckResourceAttr(resourceName, "eks_properties.0.pod_properties.0.containers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "eks_properties.0.pod_properties.0.containers.0.image_pull_policy", ""), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "type", "container"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{ + "deregister_on_new_revision", + }, + }, + }, + }) +} +func TestAccBatchJobDefinition_EKSProperties_update(t *testing.T) { + ctx := acctest.Context(t) + var jd batch.JobDefinition + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_batch_job_definition.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccJobDefinitionConfig_EKSProperties_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &jd), + ), + }, + { + Config: testAccJobDefinitionConfig_EKSProperties_advancedUpdate(rName), + Check: resource.ComposeAggregateTestCheckFunc( + testAccCheckJobDefinitionExists(ctx, resourceName, &jd), + resource.TestCheckResourceAttr(resourceName, "eks_properties.0.pod_properties.0.containers.#", "1"), + resource.TestCheckResourceAttr(resourceName, "eks_properties.0.pod_properties.0.containers.0.image_pull_policy", "Always"), + resource.TestCheckResourceAttr(resourceName, "eks_properties.0.pod_properties.0.volumes.0.name", "tmp"), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "type", "container"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccBatchJobDefinition_createTypeContainerWithBothProperties(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1205,6 +1276,109 @@ resource "aws_batch_job_definition" "test" { `, rName) } +func testAccJobDefinitionConfig_EKSProperties_basic(rName string) string { + return fmt.Sprintf(` +resource "aws_batch_job_definition" "test" { + name = %[1]q + type = "container" + eks_properties { + pod_properties { + host_network = true + containers { + image = "public.ecr.aws/amazonlinux/amazonlinux:1" + command = [ + "sleep", + "60" + ] + resources { + limits = { + cpu = "1" + memory = "1024Mi" + } + } + } + metadata { + labels = { + environment = "test" + name = %[1]q + } + } + } + } +}`, rName) +} + +func testAccJobDefinitionConfig_EKSProperties_advancedUpdate(rName string) string { + return fmt.Sprintf(` +resource "aws_batch_job_definition" "test" { + name = %[1]q + type = "container" + eks_properties { + pod_properties { + host_network = true + containers { + args = ["60"] + image = "public.ecr.aws/amazonlinux/amazonlinux:2" + image_pull_policy = "Always" + name = "sleep" + command = [ + "sleep", + ] + resources { + requests = { + cpu = "1" + memory = "1024Mi" + } + limits = { + cpu = "1" + memory = "1024Mi" + } + } + security_context { + privileged = true + read_only_root_file_system = true + run_as_group = 1000 + run_as_user = 1000 + run_as_non_root = true + } + volume_mounts { + mount_path = "/tmp" + read_only = true + name = "tmp" + } + env { + name = "Test" + value = "Environment Variable" + } + } + metadata { + labels = { + environment = "test" + name = %[1]q + } + } + volumes { + name = "tmp" + empty_dir { + medium = "Memory" + size_limit = "128Mi" + } + } + service_account_name = "test-service-account" + dns_policy = "ClusterFirst" + } + } + parameters = { + param1 = "val1" + param2 = "val2" + } + + timeout { + attempt_duration_seconds = 60 + } +}`, rName) +} + func testAccJobDefinitionConfig_createTypeContainerWithBothProperties(rName string) string { return fmt.Sprintf(` diff --git a/website/docs/cdktf/python/r/batch_job_definition.html.markdown b/website/docs/cdktf/python/r/batch_job_definition.html.markdown index 0cc17c610e1..563c013da0c 100644 --- a/website/docs/cdktf/python/r/batch_job_definition.html.markdown +++ b/website/docs/cdktf/python/r/batch_job_definition.html.markdown @@ -239,4 +239,4 @@ Using `terraform import`, import Batch Job Definition using the `arn`. For examp % terraform import aws_batch_job_definition.test arn:aws:batch:us-east-1:123456789012:job-definition/sample ``` - \ No newline at end of file + diff --git a/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown b/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown index 9aabae28994..5ff17a21c13 100644 --- a/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown +++ b/website/docs/cdktf/typescript/r/batch_job_definition.html.markdown @@ -271,4 +271,4 @@ Using `terraform import`, import Batch Job Definition using the `arn`. For examp % terraform import aws_batch_job_definition.test arn:aws:batch:us-east-1:123456789012:job-definition/sample ``` - \ No newline at end of file + diff --git a/website/docs/r/batch_job_definition.html.markdown b/website/docs/r/batch_job_definition.html.markdown index 4331cf6d38e..f3f5b2e657a 100644 --- a/website/docs/r/batch_job_definition.html.markdown +++ b/website/docs/r/batch_job_definition.html.markdown @@ -102,6 +102,38 @@ resource "aws_batch_job_definition" "test" { } ``` +### Job Definitionn of type EKS + +```terraform +resource "aws_batch_job_definition" "test" { + name = " tf_test_batch_job_definition_eks" + type = "container" + eks_properties { + pod_properties { + host_network = true + containers { + image = "public.ecr.aws/amazonlinux/amazonlinux:1" + command = [ + "sleep", + "60" + ] + resources { + limits = { + cpu = "1" + memory = "1024Mi" + } + } + } + metadata { + labels = { + environment = "test" + } + } + } + } +} +``` + ### Fargate Platform Capability ```terraform @@ -169,9 +201,10 @@ The following arguments are required: The following arguments are optional: * `container_properties` - (Optional) A valid [container properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) - provided as a single valid JSON document. This parameter is required if the `type` parameter is `container`. + provided as a single valid JSON document. This parameter is only valid if the `type` parameter is `container`. * `node_properties` - (Optional) A valid [node properties](http://docs.aws.amazon.com/batch/latest/APIReference/API_RegisterJobDefinition.html) provided as a single valid JSON document. This parameter is required if the `type` parameter is `multinode`. +* `eks_properties` - (Optional) A valid [eks properties](#eks_properties). This parameter is only valid if the `type` parameter is `container`. * `parameters` - (Optional) Specifies the parameter substitution placeholders to set in the job definition. * `platform_capabilities` - (Optional) The platform capabilities required by the job definition. If no value is specified, it defaults to `EC2`. To run the job on Fargate resources, specify `FARGATE`. * `propagate_tags` - (Optional) Specifies whether to propagate the tags from the job definition to the corresponding Amazon ECS task. Default is `false`. @@ -180,6 +213,50 @@ The following arguments are optional: * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `timeout` - (Optional) Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of `timeout` is `1`. Defined below. +### eks_properties + +* `pod_properties` - The properties for the Kubernetes pod resources of a job. + +### pod_properties + +* `containers` - The properties of the container that's used on the Amazon EKS pod. See [containers](#containers) +* `dns_policy` - (Optional) The DNS policy for the pod. The default value is `ClusterFirst`. If the hostNetwork parameter is not specified, the default is `ClusterFirstWithHostNet`. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. +* `host_network` - (Optional) Indicates if the pod uses the hosts' network IP address. The default value is `true`. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. +* `metadata` - (Optional) Metadata about the Kubernetes pod. +* `service_account_name` - (Optional) The name of the service account that's used to run the pod. +* `volumes` - (Optional) Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports [emptyDir](#eks_empty_dir), [hostPath](#eks_host_path), and [secret](#eks_secret) volume types. + +### containers + +* `image` - The Docker image used to start the container. +* `args` - An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment. +* `command` - The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. +* `env` - The environment variables to pass to a container. See [EKS Environment](#eks_environment) +* `image_pull_policy` - The image pull policy for the container. Supported values are `Always`, `IfNotPresent`, and `Never`. +* `name` - The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name. +* `resources` - The type and amount of resources to assign to a container. The supported resources include `memory`, `cpu`, and `nvidia.com/gpu` +* `security_context` - The security context for a job +* `volume_mounts` - The volume mounts for the container. + +### eks_environment + +* `name` - The name of the environment variable. +* `value` - The value of the environment variable. + +### eks_empty_dir + +* `medium` - (Optional) The medium to store the volume. The default value is an empty string, which uses the storage of the node. +* `size_limit` - The maximum size of the volume. By default, there's no maximum size defined. + +### eks_host_path + +* `path` - The path of the file or directory on the host to mount into containers on the pod. + +### eks_secret + +* `secret_name` - The name of the secret. The name must be allowed as a DNS subdomain name. +* `optional` - (Optional) Specifies whether the secret or the secret's keys must be defined. + ### retry_strategy * `attempts` - (Optional) The number of times to move a job to the `RUNNABLE` status. You may specify between `1` and `10` attempts. From 5d2cac673d9f88a59635719ded6de30dfdbebd47 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:03:59 -0500 Subject: [PATCH 225/438] dont remove force new --- internal/service/batch/job_definition.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/batch/job_definition.go b/internal/service/batch/job_definition.go index d7386a1a169..a5f115a979c 100644 --- a/internal/service/batch/job_definition.go +++ b/internal/service/batch/job_definition.go @@ -72,6 +72,7 @@ func ResourceJobDefinition() *schema.Resource { "node_properties": { Type: schema.TypeString, Optional: true, + ForceNew: true, ConflictsWith: []string{"eks_properties"}, StateFunc: func(v interface{}) string { json, _ := structure.NormalizeJsonString(v) From 6ca793b90547331e50f124d93e2ec6348036f4f0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Thu, 14 Dec 2023 16:08:12 -0500 Subject: [PATCH 226/438] r/aws_finspace_kx_volume(test): add tags test (#34928) --- internal/service/finspace/kx_volume_test.go | 101 ++++++++++++++++++++ 1 file changed, 101 insertions(+) diff --git a/internal/service/finspace/kx_volume_test.go b/internal/service/finspace/kx_volume_test.go index fa80b8b039a..9a3ad657dcf 100644 --- a/internal/service/finspace/kx_volume_test.go +++ b/internal/service/finspace/kx_volume_test.go @@ -88,6 +88,62 @@ func TestAccFinSpaceKxVolume_disappears(t *testing.T) { }) } +func TestAccFinSpaceKxVolume_tags(t *testing.T) { + if testing.Short() { + t.Skip("skipping long-running test in short mode") + } + + ctx := acctest.Context(t) + var volume finspace.GetKxVolumeOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_volume.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxVolumeDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxVolumeConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccKxVolumeConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccKxVolumeConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxVolumeExists(ctx, resourceName, &volume), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func testAccCheckKxVolumeDestroy(ctx context.Context) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).FinSpaceClient(ctx) @@ -264,3 +320,48 @@ resource "aws_finspace_kx_volume" "test" { } `, rName)) } + +func testAccKxVolumeConfig_tags1(rName, key1, value1 string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_250" + size = 1200 + } + + tags = { + %[2]q = %[3]q + } +} +`, rName, key1, value1)) +} + +func testAccKxVolumeConfig_tags2(rName, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose( + testAccKxVolumeConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_volume" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + availability_zones = [aws_finspace_kx_environment.test.availability_zones[0]] + az_mode = "SINGLE" + type = "NAS_1" + nas1_configuration { + type = "SSD_250" + size = 1200 + } + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, key1, value1, key2, value2)) +} From 452272c120f58e0659667a3b6195d47161e00380 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:08:43 -0500 Subject: [PATCH 227/438] remove ImportStateVerifyIgnore of unregistered property --- internal/service/batch/job_definition_test.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/service/batch/job_definition_test.go b/internal/service/batch/job_definition_test.go index 4b27734f041..7b22d54e7a5 100644 --- a/internal/service/batch/job_definition_test.go +++ b/internal/service/batch/job_definition_test.go @@ -593,9 +593,6 @@ func TestAccBatchJobDefinition_EKSProperties_basic(t *testing.T) { ResourceName: resourceName, ImportState: true, ImportStateVerify: true, - ImportStateVerifyIgnore: []string{ - "deregister_on_new_revision", - }, }, }, }) From a3a41794e6a48ecc14744ce02e75c5d4c28e8590 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 14 Dec 2023 21:10:26 +0000 Subject: [PATCH 228/438] Update CHANGELOG.md for #34928 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index f41aab55250..9d1842bcdde 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,7 +5,9 @@ FEATURES: * **New Data Source:** `aws_polly_voices` ([#34916](https://github.com/hashicorp/terraform-provider-aws/issues/34916)) * **New Data Source:** `aws_ssoadmin_application_assignments` ([#34796](https://github.com/hashicorp/terraform-provider-aws/issues/34796)) * **New Data Source:** `aws_ssoadmin_principal_application_assignments` ([#34815](https://github.com/hashicorp/terraform-provider-aws/issues/34815)) +* **New Resource:** `aws_finspace_kx_dataview` ([#34828](https://github.com/hashicorp/terraform-provider-aws/issues/34828)) * **New Resource:** `aws_finspace_kx_scaling_group` ([#34832](https://github.com/hashicorp/terraform-provider-aws/issues/34832)) +* **New Resource:** `aws_finspace_kx_volume` ([#34833](https://github.com/hashicorp/terraform-provider-aws/issues/34833)) * **New Resource:** `aws_ssoadmin_trusted_token_issuer` ([#34839](https://github.com/hashicorp/terraform-provider-aws/issues/34839)) ENHANCEMENTS: From 7c8471daf835e52ac6c1c797798ee7329ec6b5fc Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:18:35 -0500 Subject: [PATCH 229/438] use conflicts with, remove conflicts tests --- internal/service/batch/job_definition.go | 4 +- internal/service/batch/job_definition_test.go | 91 ------------------- 2 files changed, 2 insertions(+), 93 deletions(-) diff --git a/internal/service/batch/job_definition.go b/internal/service/batch/job_definition.go index a5f115a979c..ab2fce60460 100644 --- a/internal/service/batch/job_definition.go +++ b/internal/service/batch/job_definition.go @@ -51,7 +51,7 @@ func ResourceJobDefinition() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - ConflictsWith: []string{"eks_properties"}, + ConflictsWith: []string{"eks_properties", "node_properties"}, StateFunc: func(v interface{}) string { json, _ := structure.NormalizeJsonString(v) return json @@ -73,7 +73,7 @@ func ResourceJobDefinition() *schema.Resource { Type: schema.TypeString, Optional: true, ForceNew: true, - ConflictsWith: []string{"eks_properties"}, + ConflictsWith: []string{"container_properties", "eks_properties"}, StateFunc: func(v interface{}) string { json, _ := structure.NormalizeJsonString(v) return json diff --git a/internal/service/batch/job_definition_test.go b/internal/service/batch/job_definition_test.go index 7b22d54e7a5..38a7a7acba4 100644 --- a/internal/service/batch/job_definition_test.go +++ b/internal/service/batch/job_definition_test.go @@ -635,24 +635,6 @@ func TestAccBatchJobDefinition_EKSProperties_update(t *testing.T) { }) } -func TestAccBatchJobDefinition_createTypeContainerWithBothProperties(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccJobDefinitionConfig_createTypeContainerWithBothProperties(rName), - ExpectError: regexache.MustCompile("No `node_properties` can be specified when `type` is \"container\""), - }, - }, - }) -} - func TestAccBatchJobDefinition_createTypeContainerWithNodeProperties(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -671,24 +653,6 @@ func TestAccBatchJobDefinition_createTypeContainerWithNodeProperties(t *testing. }) } -func TestAccBatchJobDefinition_createTypeMultiNodeWithBothProperties(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, batch.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckJobDefinitionDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccJobDefinitionConfig_createTypeMultiNodeWithBothProperties(rName), - ExpectError: regexache.MustCompile("No `container_properties` can be specified when `type` is \"multinode\""), - }, - }, - }) -} - func TestAccBatchJobDefinition_createTypeMultiNodeWithContainerProperties(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -1479,61 +1443,6 @@ resource "aws_batch_job_definition" "test" { `, rName) } -func testAccJobDefinitionConfig_createTypeMultiNodeWithBothProperties(rName string) string { - return fmt.Sprintf(` - - -resource "aws_batch_job_definition" "test" { - name = %[1]q - type = "multinode" - parameters = { - param1 = "val1" - param2 = "val2" - } - timeout { - attempt_duration_seconds = 60 - } - - container_properties = jsonencode({ - command = ["echo", "test"] - image = "busybox" - memory = 128 - vcpus = 1 - }) - - node_properties = jsonencode({ - mainNode = 1 - nodeRangeProperties = [ - { - container = { - "command" : ["ls", "-la"], - "image" : "busybox", - "memory" : 512, - "vcpus" : 1 - } - targetNodes = "0:" - }, - { - container = { - command = ["echo", "test"] - environment = [] - image = "busybox" - memory = 128 - mountPoints = [] - ulimits = [] - vcpus = 1 - volumes = [] - } - targetNodes = "1:" - } - ] - numNodes = 4 - }) - -} - `, rName) -} - func testAccJobDefinitionConfig_createTypeMultiNodeWithContainerProperties(rName string) string { return fmt.Sprintf(` From 00d21252b63dc3d7bc84b7b86a370a6995ab92a0 Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 14 Dec 2023 16:20:10 -0500 Subject: [PATCH 230/438] CHANGELOG --- .changelog/34931.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34931.txt diff --git a/.changelog/34931.txt b/.changelog/34931.txt new file mode 100644 index 00000000000..8f65b4185b0 --- /dev/null +++ b/.changelog/34931.txt @@ -0,0 +1,3 @@ +`release-note:enhancement +resource/aws_batch_job_definition: Adds ability to define `eks_properties` +``` From 91060cc76aa5119a4ba2a9f1924845e49a5a0ad3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 16:39:19 -0500 Subject: [PATCH 231/438] r/aws_lb_target_group: Tidy up Create. --- internal/service/elbv2/const.go | 12 +++-- internal/service/elbv2/target_group.go | 61 ++++++++++++-------------- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 111a320a0eb..c771aeccee0 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -103,10 +103,16 @@ func healthCheckProtocolEnumValues() []string { } } +const ( + protocolVersionGRPC = "GRPC" + protocolVersionHTTP1 = "HTTP1" + protocolVersionHTTP2 = "HTTP2" +) + func protocolVersionEnumValues() []string { return []string{ - "GRPC", - "HTTP1", - "HTTP2", + protocolVersionGRPC, + protocolVersionHTTP1, + protocolVersionHTTP2, } } diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 48c0151be64..cb5ff6f3c77 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -383,60 +383,57 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta TargetType: aws.String(d.Get("target_type").(string)), } - if d.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { + if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { input.Port = aws.Int64(int64(d.Get("port").(int))) - input.Protocol = aws.String(d.Get("protocol").(string)) - switch d.Get("protocol").(string) { + protocol := d.Get("protocol").(string) + input.Protocol = aws.String(protocol) + switch protocol { case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: input.ProtocolVersion = aws.String(d.Get("protocol_version").(string)) } input.VpcId = aws.String(d.Get("vpc_id").(string)) - if d.Get("target_type").(string) == elbv2.TargetTypeEnumIp { - if _, ok := d.GetOk("ip_address_type"); ok { - input.IpAddressType = aws.String(d.Get("ip_address_type").(string)) + if targetType == elbv2.TargetTypeEnumIp { + if v, ok := d.GetOk("ip_address_type"); ok { + input.IpAddressType = aws.String(v.(string)) } } } - if healthChecks := d.Get("health_check").([]interface{}); len(healthChecks) == 1 { - healthCheck := healthChecks[0].(map[string]interface{}) - - input.HealthCheckEnabled = aws.Bool(healthCheck["enabled"].(bool)) + if v, ok := d.GetOk("health_check"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) - input.HealthCheckIntervalSeconds = aws.Int64(int64(healthCheck["interval"].(int))) + input.HealthCheckEnabled = aws.Bool(tfMap["enabled"].(bool)) + input.HealthCheckIntervalSeconds = aws.Int64(int64(tfMap["interval"].(int))) + input.HealthyThresholdCount = aws.Int64(int64(tfMap["healthy_threshold"].(int))) + input.UnhealthyThresholdCount = aws.Int64(int64(tfMap["unhealthy_threshold"].(int))) - input.HealthyThresholdCount = aws.Int64(int64(healthCheck["healthy_threshold"].(int))) - input.UnhealthyThresholdCount = aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))) - t := healthCheck["timeout"].(int) - if t != 0 { - input.HealthCheckTimeoutSeconds = aws.Int64(int64(t)) + if v, ok := tfMap["timeout"].(int); ok && v != 0 { + input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - healthCheckProtocol := healthCheck["protocol"].(string) - if healthCheckProtocol != elbv2.ProtocolEnumTcp { - p := healthCheck["path"].(string) - if p != "" { - input.HealthCheckPath = aws.String(p) + protocol := tfMap["protocol"].(string) + if protocol != elbv2.ProtocolEnumTcp { + if v, ok := tfMap["path"].(string); ok && v != "" { + input.HealthCheckPath = aws.String(v) } - m := healthCheck["matcher"].(string) - protocolVersion := d.Get("protocol_version").(string) - if m != "" { - if protocolVersion == "GRPC" { + if v, ok := tfMap["matcher"].(string); ok && v != "" { + if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { input.Matcher = &elbv2.Matcher{ - GrpcCode: aws.String(m), + GrpcCode: aws.String(v), } } else { input.Matcher = &elbv2.Matcher{ - HttpCode: aws.String(m), + HttpCode: aws.String(v), } } } } - if d.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { - input.HealthCheckPort = aws.String(healthCheck["port"].(string)) - input.HealthCheckProtocol = aws.String(healthCheckProtocol) + + if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + input.HealthCheckPort = aws.String(tfMap["port"].(string)) + input.HealthCheckProtocol = aws.String(protocol) } } @@ -461,10 +458,6 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "creating ELBv2 Target Group (%s): %s", name, err) } - if len(output.TargetGroups) == 0 { - return sdkdiag.AppendErrorf(diags, "creating LB Target Group: no groups returned in response") - } - d.SetId(aws.StringValue(output.TargetGroups[0].TargetGroupArn)) _, err = tfresource.RetryWhenNotFound(ctx, propagationTimeout, func() (interface{}, error) { From 43d401bce33f9cfd0b401659e9af3383a75647c9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 17:19:26 -0500 Subject: [PATCH 232/438] r/aws_lb_target_group: Add attribute constants. --- internal/service/elbv2/const.go | 95 ++++++++++++++++++++++++++ internal/service/elbv2/target_group.go | 53 +++++--------- 2 files changed, 112 insertions(+), 36 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index c771aeccee0..87ca3b2676e 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -91,6 +91,101 @@ func httpXFFHeaderProcessingMode_Values() []string { } } +// See https://docs.aws.amazon.com/elasticloadbalancing/latest/APIReference/API_TargetGroupAttribute.html#API_TargetGroupAttribute_Contents. +const ( + // The following attributes are supported by all load balancers: + targetGroupAttributeDeregistrationDelayTimeoutSeconds = "deregistration_delay.timeout_seconds" + targetGroupAttributeDeregistrationStickinessEnabled = "stickiness.enabled" + targetGroupAttributeDeregistrationStickinessType = "stickiness.enabled" + + // The following attributes are supported by Application Load Balancers and Network Load Balancers: + targetGroupAttributeLoadBalancingCrossZoneEnabled = "load_balancing.cross_zone.enabled" + targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsCount = "target_group_health.dns_failover.minimum_healthy_targets.count" + targetGroupAttributeTargetGroupHealthDNSFailoverMinimumHealthyTargetsPercentage = "target_group_health.dns_failover.minimum_healthy_targets.percentage" + targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsCount = "target_group_health.unhealthy_state_routing.minimum_healthy_targets.count" + targetGroupAttributeTargetGroupHealthUnhealthyStateRoutingMinimumHealthyTargetsPercentage = "target_group_health.unhealthy_state_routing.minimum_healthy_targets.percentage" + + // The following attributes are supported only if the load balancer is an Application Load Balancer and the target is an instance or an IP address: + targetGroupAttributeLoadBalancingAlgorithmType = "load_balancing.algorithm.type" + targetGroupAttributeLoadBalancingAlgorithmAnomalyMitigation = "load_balancing.algorithm.anomaly_mitigation" + targetGroupAttributeSlowStartDurationSeconds = "slow_start.duration_seconds" + targetGroupAttributeStickinessAppCookieCookieName = "stickiness.app_cookie.cookie_name" + targetGroupAttributeStickinessAppCookieDurationSeconds = "stickiness.app_cookie.duration_seconds" + targetGroupAttributeStickinessLBCookieDurationSeconds = "stickiness.lb_cookie.duration_seconds" + + // The following attribute is supported only if the load balancer is an Application Load Balancer and the target is a Lambda function: + targetGroupAttributeLambdaMultiValueHeadersEnabled = "lambda.multi_value_headers.enabled" + + // The following attributes are supported only by Network Load Balancers: + targetGroupAttributeDeregistrationDelayConnectionTerminationEnabled = "deregistration_delay.connection_termination.enabled" + targetGroupAttributePreserveClientIPEnabled = "preserve_client_ip.enabled" + targetGroupAttributeProxyProtocolV2Enabled = "proxy_protocol_v2.enabled" + targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled = "target_health_state.unhealthy.connection_termination.enabled" + + // The following attributes are supported only by Gateway Load Balancers: + targetGroupAttributeTargetFailoverOnDeregistration = "target_failover.on_deregistration" + targetGroupAttributeTargetFailoverOnUnhealthy = "target_failover.on_unhealthy" +) + +const ( + loadBalancingAlgorithmTypeRoundRobin = "round_robin" + loadBalancingAlgorithmTypeLeastOutstandingRequests = "least_outstanding_requests" + loadBalancingAlgorithmTypeWeightedRandom = "weighted_random" +) + +func loadBalancingAlgorithmType_Values() []string { + return []string{ + loadBalancingAlgorithmTypeRoundRobin, + loadBalancingAlgorithmTypeLeastOutstandingRequests, + // TODO + // loadBalancingAlgorithmTypeWeightedRandom, + } +} + +const ( + loadBalancingCrossZoneEnabledTrue = "true" + loadBalancingCrossZoneEnabledFalse = "false" + loadBalancingCrossZoneEnabledUseLoadBalancerConfiguration = "use_load_balancer_configuration" +) + +func loadBalancingCrossZoneEnabled_Values() []string { + return []string{ + loadBalancingCrossZoneEnabledTrue, + loadBalancingCrossZoneEnabledFalse, + loadBalancingCrossZoneEnabledUseLoadBalancerConfiguration, + } +} + +const ( + stickinessTypeLBCookie = "lb_cookie" // Only for ALBs + stickinessTypeAppCookie = "app_cookie" // Only for ALBs + stickinessTypeSourceIP = "source_ip" // Only for NLBs + stickinessTypeSourceIPDestIP = "source_ip_dest_ip" // Only for GWLBs + stickinessTypeSourceIPDestIPProto = "source_ip_dest_ip_proto" // Only for GWLBs +) + +func stickinessType_Values() []string { + return []string{ + stickinessTypeLBCookie, + stickinessTypeAppCookie, + stickinessTypeSourceIP, + stickinessTypeSourceIPDestIP, + stickinessTypeSourceIPDestIPProto, + } +} + +const ( + targetFailoverRebalance = "rebalance" + targetFailoverNoRebalance = "no_rebalance" +) + +func targetFailover_Values() []string { + return []string{ + targetFailoverRebalance, + targetFailoverNoRebalance, + } +} + const ( healthCheckPortTrafficPort = "traffic-port" ) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index cb5ff6f3c77..df6b14cb5b1 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -156,23 +156,16 @@ func ResourceTargetGroup() *schema.Resource { Default: false, }, "load_balancing_algorithm_type": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - "round_robin", - "least_outstanding_requests", - }, false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(loadBalancingAlgorithmType_Values(), false), }, "load_balancing_cross_zone_enabled": { - Type: schema.TypeString, - Optional: true, - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - "true", - "false", - "use_load_balancer_configuration", - }, false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(loadBalancingCrossZoneEnabled_Values(), false), }, "name": { Type: schema.TypeString, @@ -276,15 +269,9 @@ func ResourceTargetGroup() *schema.Resource { Default: true, }, "type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "lb_cookie", // Only for ALBs - "app_cookie", // Only for ALBs - "source_ip", // Only for NLBs - "source_ip_dest_ip", // Only for GWLBs - "source_ip_dest_ip_proto", // Only for GWLBs - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(stickinessType_Values(), false), }, }, }, @@ -298,20 +285,14 @@ func ResourceTargetGroup() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "on_deregistration": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "rebalance", - "no_rebalance", - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(targetFailover_Values(), false), }, "on_unhealthy": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - "rebalance", - "no_rebalance", - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(targetFailover_Values(), false), }, }, }, From 7f04626a6596e1a9ea8df1bc9675ea2a5bf399ab Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 14 Dec 2023 17:33:51 -0500 Subject: [PATCH 233/438] r/aws_lb_target_group: Tidy up Update. --- internal/service/elbv2/target_group.go | 58 +++++++++++++------------- 1 file changed, 28 insertions(+), 30 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index df6b14cb5b1..8b7c08b0175 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -646,48 +646,46 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) if d.HasChange("health_check") { - var params *elbv2.ModifyTargetGroupInput - healthChecks := d.Get("health_check").([]interface{}) - if len(healthChecks) == 1 { - healthCheck := healthChecks[0].(map[string]interface{}) + if v, ok := d.GetOk("health_check"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) - params = &elbv2.ModifyTargetGroupInput{ + input := &elbv2.ModifyTargetGroupInput{ + HealthCheckEnabled: aws.Bool(tfMap["enabled"].(bool)), + HealthCheckIntervalSeconds: aws.Int64(int64(tfMap["interval"].(int))), + HealthyThresholdCount: aws.Int64(int64(tfMap["healthy_threshold"].(int))), TargetGroupArn: aws.String(d.Id()), - HealthCheckEnabled: aws.Bool(healthCheck["enabled"].(bool)), - HealthCheckIntervalSeconds: aws.Int64(int64(healthCheck["interval"].(int))), - HealthyThresholdCount: aws.Int64(int64(healthCheck["healthy_threshold"].(int))), - UnhealthyThresholdCount: aws.Int64(int64(healthCheck["unhealthy_threshold"].(int))), + UnhealthyThresholdCount: aws.Int64(int64(tfMap["unhealthy_threshold"].(int))), } - t := healthCheck["timeout"].(int) - if t != 0 { - params.HealthCheckTimeoutSeconds = aws.Int64(int64(t)) + if v, ok := tfMap["timeout"].(int); ok && v != 0 { + input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - healthCheckProtocol := healthCheck["protocol"].(string) - protocolVersion := d.Get("protocol_version").(string) - if healthCheckProtocol != elbv2.ProtocolEnumTcp && !d.IsNewResource() { - if protocolVersion == "GRPC" { - params.Matcher = &elbv2.Matcher{ - GrpcCode: aws.String(healthCheck["matcher"].(string)), - } - } else { - params.Matcher = &elbv2.Matcher{ - HttpCode: aws.String(healthCheck["matcher"].(string)), + protocol := tfMap["protocol"].(string) + if protocol != elbv2.ProtocolEnumTcp { + if v, ok := tfMap["matcher"].(string); ok { + if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { + input.Matcher = &elbv2.Matcher{ + GrpcCode: aws.String(v), + } + } else { + input.Matcher = &elbv2.Matcher{ + HttpCode: aws.String(v), + } } } - params.HealthCheckPath = aws.String(healthCheck["path"].(string)) + input.HealthCheckPath = aws.String(tfMap["path"].(string)) } - if d.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { - params.HealthCheckPort = aws.String(healthCheck["port"].(string)) - params.HealthCheckProtocol = aws.String(healthCheckProtocol) + + if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + input.HealthCheckPort = aws.String(tfMap["port"].(string)) + input.HealthCheckProtocol = aws.String(protocol) } - } - if params != nil { - _, err := conn.ModifyTargetGroupWithContext(ctx, params) + _, err := conn.ModifyTargetGroupWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying Target Group: %s", err) + return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s): %s", d.Id(), err) } } } From 399ec5d623675592ac4924d733c198d4a65f82e3 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 15 Dec 2023 01:09:37 +0000 Subject: [PATCH 234/438] Update CHANGELOG.md (Manual Trigger) --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d1842bcdde..e355dbc5b1e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,8 @@ ENHANCEMENTS: * resource/aws_db_instance: Add support for IBM Db2 databases ([#34834](https://github.com/hashicorp/terraform-provider-aws/issues/34834)) * resource/aws_dms_endpoint: Add `elasticsearch_settings.use_new_mapping_type` argument ([#29470](https://github.com/hashicorp/terraform-provider-aws/issues/29470)) * resource/aws_dms_endpoint: Add `postgres_settings` configuration block ([#34724](https://github.com/hashicorp/terraform-provider-aws/issues/34724)) +* resource/aws_finspace_kx_cluster: Add `database.dataview_name`, `scaling_group_configuration`, and `tickerplant_log_configuration` arguments. ([#34831](https://github.com/hashicorp/terraform-provider-aws/issues/34831)) +* resource/aws_finspace_kx_cluster: The `capacity_configuration` argument is now optional. ([#34831](https://github.com/hashicorp/terraform-provider-aws/issues/34831)) * resource/aws_lb: Add `connection_logs` configuration block ([#34864](https://github.com/hashicorp/terraform-provider-aws/issues/34864)) * resource/aws_lb: Add plan-time validation that exactly one of either `subnets` or `subnet_mapping` is configured ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) * resource/aws_lb: Allow the number of `subnet_mapping`s for Application Load Balancers to be changed without recreating the resource ([#33205](https://github.com/hashicorp/terraform-provider-aws/issues/33205)) From f9bfb2a828f56c4855b6cde1793708f6f29f8f0d Mon Sep 17 00:00:00 2001 From: Daniel Quackenbush <25692880+danquack@users.noreply.github.com> Date: Thu, 14 Dec 2023 19:28:50 -0600 Subject: [PATCH 235/438] Remove additional function --- internal/service/batch/job_definition_test.go | 55 ------------------- 1 file changed, 55 deletions(-) diff --git a/internal/service/batch/job_definition_test.go b/internal/service/batch/job_definition_test.go index 38a7a7acba4..35d348b26f8 100644 --- a/internal/service/batch/job_definition_test.go +++ b/internal/service/batch/job_definition_test.go @@ -1340,61 +1340,6 @@ resource "aws_batch_job_definition" "test" { }`, rName) } -func testAccJobDefinitionConfig_createTypeContainerWithBothProperties(rName string) string { - return fmt.Sprintf(` - - -resource "aws_batch_job_definition" "test" { - name = %[1]q - type = "container" - parameters = { - param1 = "val1" - param2 = "val2" - } - timeout { - attempt_duration_seconds = 60 - } - - container_properties = jsonencode({ - command = ["echo", "test"] - image = "busybox" - memory = 128 - vcpus = 1 - }) - - node_properties = jsonencode({ - mainNode = 1 - nodeRangeProperties = [ - { - container = { - "command" : ["ls", "-la"], - "image" : "busybox", - "memory" : 512, - "vcpus" : 1 - } - targetNodes = "0:" - }, - { - container = { - command = ["echo", "test"] - environment = [] - image = "busybox" - memory = 128 - mountPoints = [] - ulimits = [] - vcpus = 1 - volumes = [] - } - targetNodes = "1:" - } - ] - numNodes = 4 - }) - -} - `, rName) -} - func testAccJobDefinitionConfig_createTypeContainerWithNodeProperties(rName string) string { return fmt.Sprintf(` From c4f1b8ce3a339d5545942db7605c9b9259105b6f Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 15 Dec 2023 02:17:52 +0000 Subject: [PATCH 236/438] Update CHANGELOG.md after v5.31.0 --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e355dbc5b1e..8a04d9dd4c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,5 @@ -## 5.31.0 (Unreleased) +## 5.32.0 (Unreleased) +## 5.31.0 (December 15, 2023) FEATURES: From e444e8566eda3298a40ca30f68c202303630a8a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Dec 2023 06:01:30 +0000 Subject: [PATCH 237/438] build(deps): bump actions/upload-artifact from 3.1.3 to 4.0.0 Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3.1.3 to 4.0.0. - [Release notes](https://github.com/actions/upload-artifact/releases) - [Commits](https://github.com/actions/upload-artifact/compare/a8a3f3ad30e3422c9c7b888a15615d19a852ae32...c7d193f32edcb7bfad88892161225aeda64e9392) --- updated-dependencies: - dependency-name: actions/upload-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] --- .github/workflows/firewatch.yml | 2 +- .github/workflows/release.yml | 4 ++-- .github/workflows/snapshot.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/firewatch.yml b/.github/workflows/firewatch.yml index b574606f1be..06bf35be99e 100644 --- a/.github/workflows/firewatch.yml +++ b/.github/workflows/firewatch.yml @@ -17,7 +17,7 @@ jobs: slack_token: ${{ secrets.SLACK_BOT_TOKEN }} slack_channel: ${{ secrets.SLACK_CHANNEL }} - name: UploadArtifact - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 with: name: firewatch path: firewatch.data diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index f296ca84ecb..72a0e6af594 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -17,7 +17,7 @@ jobs: fetch-depth: 0 - name: Generate Release Notes run: sed -n -e "1{/# /d;}" -e "2{/^$/d;}" -e "/# $(git describe --abbrev=0 --exclude="$(git describe --abbrev=0 --match='v*.*.*' --tags)" --match='v*.*.*' --tags | tr -d v)/q;p" CHANGELOG.md > release-notes.txt - - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 with: name: release-notes path: release-notes.txt @@ -103,7 +103,7 @@ jobs: steps: - name: Save Release Tag run: echo ${{ github.ref_name }} > release-tag.data - - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 with: name: release-tag path: release-tag.data diff --git a/.github/workflows/snapshot.yml b/.github/workflows/snapshot.yml index de96b368ba4..1e753de2c22 100644 --- a/.github/workflows/snapshot.yml +++ b/.github/workflows/snapshot.yml @@ -36,7 +36,7 @@ jobs: ARTIFACT="${GITHUB_REF}";; esac echo "artifact=$ARTIFACT-$(date -u +'%Y-%m-%dT%H-%M')" >> "$GITHUB_OUTPUT" - - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + - uses: actions/upload-artifact@c7d193f32edcb7bfad88892161225aeda64e9392 # v4.0.0 with: name: ${{steps.naming.outputs.artifact}} path: dist/*.zip From 7cfb2cc4e906cba10f60047b8ea048a3ded65df2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 09:00:56 -0500 Subject: [PATCH 238/438] r/aws_lb_target_group: Add 'expandTargetGroupStickinessAttributes'. --- internal/service/elbv2/const.go | 4 +- internal/service/elbv2/target_group.go | 242 +++++++++++-------------- 2 files changed, 110 insertions(+), 136 deletions(-) diff --git a/internal/service/elbv2/const.go b/internal/service/elbv2/const.go index 87ca3b2676e..d4ce150cf1c 100644 --- a/internal/service/elbv2/const.go +++ b/internal/service/elbv2/const.go @@ -95,8 +95,8 @@ func httpXFFHeaderProcessingMode_Values() []string { const ( // The following attributes are supported by all load balancers: targetGroupAttributeDeregistrationDelayTimeoutSeconds = "deregistration_delay.timeout_seconds" - targetGroupAttributeDeregistrationStickinessEnabled = "stickiness.enabled" - targetGroupAttributeDeregistrationStickinessType = "stickiness.enabled" + targetGroupAttributeStickinessEnabled = "stickiness.enabled" + targetGroupAttributeStickinessType = "stickiness.type" // The following attributes are supported by Application Load Balancers and Network Load Balancers: targetGroupAttributeLoadBalancingCrossZoneEnabled = "load_balancing.cross_zone.enabled" diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 8b7c08b0175..9ee89eb7a04 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" @@ -358,15 +359,16 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta runtimeValidations(d, &diags) + protocol := d.Get("protocol").(string) + targetType := d.Get("target_type").(string) input := &elbv2.CreateTargetGroupInput{ Name: aws.String(name), Tags: getTagsIn(ctx), - TargetType: aws.String(d.Get("target_type").(string)), + TargetType: aws.String(targetType), } - if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { input.Port = aws.Int64(int64(d.Get("port").(int))) - protocol := d.Get("protocol").(string) input.Protocol = aws.String(protocol) switch protocol { case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: @@ -393,8 +395,8 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - protocol := tfMap["protocol"].(string) - if protocol != elbv2.ProtocolEnumTcp { + healthCheckProtocol := tfMap["protocol"].(string) + if healthCheckProtocol != elbv2.ProtocolEnumTcp { if v, ok := tfMap["path"].(string); ok && v != "" { input.HealthCheckPath = aws.String(v) } @@ -412,9 +414,9 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta } } - if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { input.HealthCheckPort = aws.String(tfMap["port"].(string)) - input.HealthCheckProtocol = aws.String(protocol) + input.HealthCheckProtocol = aws.String(healthCheckProtocol) } } @@ -449,65 +451,69 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "waiting for ELBv2 Target Group (%s) create: %s", d.Id(), err) } - var attrs []*elbv2.TargetGroupAttribute + var attributes []*elbv2.TargetGroupAttribute - switch d.Get("target_type").(string) { + switch targetType { case elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp: + if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.timeout_seconds"), Value: aws.String(fmt.Sprintf("%d", v)), }) } if v, ok := d.GetOk("load_balancing_algorithm_type"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.algorithm.type"), Value: aws.String(v.(string)), }) } if v, ok := d.GetOk("load_balancing_cross_zone_enabled"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.cross_zone.enabled"), Value: aws.String(v.(string)), }) } if v, ok := d.GetOk("preserve_client_ip"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("preserve_client_ip.enabled"), Value: aws.String(v.(string)), }) } if v, ok := d.GetOk("proxy_protocol_v2"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("proxy_protocol_v2.enabled"), Value: aws.String(strconv.FormatBool(v.(bool))), }) } if v, ok := d.GetOk("connection_termination"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(v.(bool))), }) } if v, ok := d.GetOk("slow_start"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("slow_start.duration_seconds"), Value: aws.String(fmt.Sprintf("%d", v.(int))), }) } // Only supported for GWLB - if v, ok := d.Get("protocol").(string); ok && v == elbv2.ProtocolEnumGeneve { + if protocol == elbv2.ProtocolEnumGeneve { if v, ok := d.GetOk("target_failover"); ok { failoverBlock := v.([]interface{}) failover := failoverBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_failover.on_deregistration"), Value: aws.String(failover["on_deregistration"].(string)), @@ -526,7 +532,7 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 { targetHealthStateBlock := v.([]interface{}) targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), @@ -535,64 +541,25 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta } } } - - if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 { - stickinessBlocks := v.([]interface{}) - stickiness := stickinessBlocks[0].(map[string]interface{}) - - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.enabled"), - Value: aws.String(strconv.FormatBool(stickiness["enabled"].(bool))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.type"), - Value: aws.String(stickiness["type"].(string)), - }) - - switch d.Get("protocol").(string) { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - switch stickiness["type"].(string) { - case "lb_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.lb_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }) - case "app_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.cookie_name"), - Value: aws.String(stickiness["cookie_name"].(string)), - }) - default: - log.Printf("[WARN] Unexpected stickiness type. Expected lb_cookie or app_cookie, got %s", stickiness["type"].(string)) - } - } - } case elbv2.TargetTypeEnumLambda: if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("lambda.multi_value_headers.enabled"), Value: aws.String(strconv.FormatBool(v.(bool))), }) } } - if len(attrs) > 0 { - params := &elbv2.ModifyTargetGroupAttributesInput{ + if len(attributes) > 0 { + input := &elbv2.ModifyTargetGroupAttributesInput{ + Attributes: attributes, TargetGroupArn: aws.String(d.Id()), - Attributes: attrs, } - _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, params) + _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying Target Group Attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } } @@ -645,6 +612,9 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + protocol := d.Get("protocol").(string) + targetType := d.Get("target_type").(string) + if d.HasChange("health_check") { if v, ok := d.GetOk("health_check"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { tfMap := v.([]interface{})[0].(map[string]interface{}) @@ -661,8 +631,8 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckTimeoutSeconds = aws.Int64(int64(v)) } - protocol := tfMap["protocol"].(string) - if protocol != elbv2.ProtocolEnumTcp { + healthCheckProtocol := tfMap["protocol"].(string) + if healthCheckProtocol != elbv2.ProtocolEnumTcp { if v, ok := tfMap["matcher"].(string); ok { if protocolVersion := d.Get("protocol_version").(string); protocolVersion == protocolVersionGRPC { input.Matcher = &elbv2.Matcher{ @@ -677,9 +647,9 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta input.HealthCheckPath = aws.String(tfMap["path"].(string)) } - if targetType := d.Get("target_type").(string); targetType != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { input.HealthCheckPort = aws.String(tfMap["port"].(string)) - input.HealthCheckProtocol = aws.String(protocol) + input.HealthCheckProtocol = aws.String(healthCheckProtocol) } _, err := conn.ModifyTargetGroupWithContext(ctx, input) @@ -690,13 +660,24 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } - var attrs []*elbv2.TargetGroupAttribute + var attributes []*elbv2.TargetGroupAttribute - switch d.Get("target_type").(string) { + switch targetType { case elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp: + if d.HasChange("stickiness") { + if v, ok := d.GetOk("stickiness"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } else { + attributes = append(attributes, &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessEnabled), + Value: flex.BoolValueToString(false), + }) + } + } + if d.HasChange("deregistration_delay") { if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.timeout_seconds"), Value: aws.String(fmt.Sprintf("%d", v)), }) @@ -704,91 +685,42 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } if d.HasChange("slow_start") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("slow_start.duration_seconds"), Value: aws.String(fmt.Sprintf("%d", d.Get("slow_start").(int))), }) } if d.HasChange("proxy_protocol_v2") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("proxy_protocol_v2.enabled"), Value: aws.String(strconv.FormatBool(d.Get("proxy_protocol_v2").(bool))), }) } if d.HasChange("connection_termination") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(d.Get("connection_termination").(bool))), }) } if d.HasChange("preserve_client_ip") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("preserve_client_ip.enabled"), Value: aws.String(d.Get("preserve_client_ip").(string)), }) } - if d.HasChange("stickiness") { - stickinessBlocks := d.Get("stickiness").([]interface{}) - if len(stickinessBlocks) == 1 { - stickiness := stickinessBlocks[0].(map[string]interface{}) - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.enabled"), - Value: aws.String(strconv.FormatBool(stickiness["enabled"].(bool))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.type"), - Value: aws.String(stickiness["type"].(string)), - }) - - switch d.Get("protocol").(string) { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - switch stickiness["type"].(string) { - case "lb_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.lb_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.cookie_name"), - Value: aws.String(stickiness["cookie_name"].(string)), - }) - case "app_cookie": - attrs = append(attrs, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", stickiness["cookie_duration"].(int))), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.app_cookie.cookie_name"), - Value: aws.String(stickiness["cookie_name"].(string)), - }) - default: - log.Printf("[WARN] Unexpected stickiness type. Expected lb_cookie or app_cookie, got %s", stickiness["type"].(string)) - } - } - } else if len(stickinessBlocks) == 0 { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ - Key: aws.String("stickiness.enabled"), - Value: aws.String("false"), - }) - } - } - if d.HasChange("load_balancing_algorithm_type") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.algorithm.type"), Value: aws.String(d.Get("load_balancing_algorithm_type").(string)), }) } if d.HasChange("load_balancing_cross_zone_enabled") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("load_balancing.cross_zone.enabled"), Value: aws.String(d.Get("load_balancing_cross_zone_enabled").(string)), }) @@ -798,7 +730,7 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta targetHealthStateBlock := d.Get("target_health_state").([]interface{}) if len(targetHealthStateBlock) == 1 { targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), @@ -810,7 +742,7 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta failoverBlock := d.Get("target_failover").([]interface{}) if len(failoverBlock) == 1 { failover := failoverBlock[0].(map[string]interface{}) - attrs = append(attrs, + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("target_failover.on_deregistration"), Value: aws.String(failover["on_deregistration"].(string)), @@ -825,22 +757,23 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta case elbv2.TargetTypeEnumLambda: if d.HasChange("lambda_multi_value_headers_enabled") { - attrs = append(attrs, &elbv2.TargetGroupAttribute{ + attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("lambda.multi_value_headers.enabled"), Value: aws.String(strconv.FormatBool(d.Get("lambda_multi_value_headers_enabled").(bool))), }) } } - if len(attrs) > 0 { - params := &elbv2.ModifyTargetGroupAttributesInput{ + if len(attributes) > 0 { + input := &elbv2.ModifyTargetGroupAttributesInput{ + Attributes: attributes, TargetGroupArn: aws.String(d.Id()), - Attributes: attrs, } - _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, params) + _, err := conn.ModifyTargetGroupAttributesWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "modifying Target Group Attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "modifying ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } } @@ -1003,6 +936,47 @@ func TargetGroupSuffixFromARN(arn *string) string { return "" } +func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { + if tfMap == nil { + return nil + } + + apiObjects := []*elbv2.TargetGroupAttribute{ + { + Key: aws.String(targetGroupAttributeStickinessEnabled), + Value: flex.BoolValueToString(tfMap["enabled"].(bool)), + }, + { + Key: aws.String(targetGroupAttributeStickinessType), + Value: aws.String(tfMap["type"].(string)), + }, + } + + switch protocol { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + switch stickinessType := tfMap["type"].(string); stickinessType { + case stickinessTypeLBCookie: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessLBCookieDurationSeconds), + Value: flex.IntValueToString(tfMap["cookie_duration"].(int)), + }) + case stickinessTypeAppCookie: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessAppCookieCookieName), + Value: aws.String(tfMap["cookie_name"].(string)), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeStickinessAppCookieDurationSeconds), + Value: flex.IntValueToString(tfMap["cookie_duration"].(int)), + }) + } + } + + return apiObjects +} + // flattenTargetGroupResource takes a *elbv2.TargetGroup and populates all respective resource fields. func flattenTargetGroupResource(ctx context.Context, d *schema.ResourceData, meta interface{}, targetGroup *elbv2.TargetGroup) error { conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) From e23d2f2bee92cf3520c9490b87590ef0e20ce77f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 10:24:27 -0500 Subject: [PATCH 239/438] r/aws_lb_target_group: Tidy up Read. --- internal/service/elbv2/target_group.go | 242 +++++++++++++------------ 1 file changed, 127 insertions(+), 115 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 9ee89eb7a04..2a95c61a3a8 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -584,9 +584,7 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - outputRaw, err := tfresource.RetryWhenNewResourceNotFound(ctx, propagationTimeout, func() (interface{}, error) { - return FindTargetGroupByARN(ctx, conn, d.Id()) - }, d.IsNewResource()) + targetGroup, err := FindTargetGroupByARN(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Target Group %s not found, removing from state", d.Id()) @@ -602,9 +600,108 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i runtimeValidations(d, &diags) } - if err := flattenTargetGroupResource(ctx, d, meta, outputRaw.(*elbv2.TargetGroup)); err != nil { - return sdkdiag.AppendFromErr(diags, err) + targetType := aws.StringValue(targetGroup.TargetType) + + d.Set("arn", targetGroup.TargetGroupArn) + d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) + d.Set("ip_address_type", targetGroup.IpAddressType) + d.Set("name", targetGroup.TargetGroupName) + d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) + d.Set("target_type", targetType) + + if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) + } + + if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { + d.Set("port", targetGroup.Port) + } + if _, ok := d.GetOk("protocol"); targetGroup.Protocol != nil || ok { + d.Set("protocol", targetGroup.Protocol) + } + if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { + d.Set("protocol_version", targetGroup.ProtocolVersion) + } + if _, ok := d.GetOk("vpc_id"); targetGroup.VpcId != nil || ok { + d.Set("vpc_id", targetGroup.VpcId) + } + + attributes, err := findTargetGroupAttributesByARN(ctx, conn, d.Id()) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) + } + + for _, attr := range attributes { + switch aws.StringValue(attr.Key) { + case "deregistration_delay.timeout_seconds": + d.Set("deregistration_delay", attr.Value) + case "lambda.multi_value_headers.enabled": + enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("lambda_multi_value_headers_enabled", enabled) + case "proxy_protocol_v2.enabled": + enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("proxy_protocol_v2", enabled) + case "deregistration_delay.connection_termination.enabled": + enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("connection_termination", enabled) + case "slow_start.duration_seconds": + slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) + } + d.Set("slow_start", slowStart) + case "load_balancing.algorithm.type": + loadBalancingAlgorithm := aws.StringValue(attr.Value) + d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) + case "load_balancing.cross_zone.enabled": + loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) + d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) + case "preserve_client_ip.enabled": + _, err := strconv.ParseBool(aws.StringValue(attr.Value)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) + } + d.Set("preserve_client_ip", attr.Value) + } + } + + stickinessAttr, err := flattenTargetGroupStickiness(attributes) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening stickiness: %s", err) + } + + if err := d.Set("stickiness", stickinessAttr); err != nil { + return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) + } + + targetHealthStateAttr, err := flattenTargetHealthState(attributes) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening target health state: %s", err) + } + if err := d.Set("target_health_state", targetHealthStateAttr); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target health state: %s", err) + } + + // Set target failover attributes for GWLB + targetFailoverAttr := flattenTargetGroupFailover(attributes) + if err != nil { + return sdkdiag.AppendErrorf(diags, "flattening target failover: %s", err) + } + + if err := d.Set("target_failover", targetFailoverAttr); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target failover: %s", err) } + return diags } @@ -881,6 +978,31 @@ func findTargetGroups(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.Descr return output, nil } +func findTargetGroupAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) ([]*elbv2.TargetGroupAttribute, error) { + input := &elbv2.DescribeTargetGroupAttributesInput{ + TargetGroupArn: aws.String(arn), + } + + output, err := conn.DescribeTargetGroupAttributesWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.Attributes, nil +} + func validTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { value := v.(string) if !strings.HasPrefix(value, "/") { @@ -977,116 +1099,6 @@ func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protoco return apiObjects } -// flattenTargetGroupResource takes a *elbv2.TargetGroup and populates all respective resource fields. -func flattenTargetGroupResource(ctx context.Context, d *schema.ResourceData, meta interface{}, targetGroup *elbv2.TargetGroup) error { - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - - targetType := aws.StringValue(targetGroup.TargetType) - - d.Set("arn", targetGroup.TargetGroupArn) - d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) - d.Set("ip_address_type", targetGroup.IpAddressType) - d.Set("name", targetGroup.TargetGroupName) - d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) - d.Set("target_type", targetType) - - if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { - return fmt.Errorf("setting health_check: %w", err) - } - - if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { - d.Set("port", targetGroup.Port) - } - if _, ok := d.GetOk("protocol"); targetGroup.Protocol != nil || ok { - d.Set("protocol", targetGroup.Protocol) - } - if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { - d.Set("protocol_version", targetGroup.ProtocolVersion) - } - if _, ok := d.GetOk("vpc_id"); targetGroup.VpcId != nil || ok { - d.Set("vpc_id", targetGroup.VpcId) - } - - attrResp, err := conn.DescribeTargetGroupAttributesWithContext(ctx, &elbv2.DescribeTargetGroupAttributesInput{ - TargetGroupArn: aws.String(d.Id()), - }) - if err != nil { - return fmt.Errorf("retrieving Target Group Attributes: %w", err) - } - - for _, attr := range attrResp.Attributes { - switch aws.StringValue(attr.Key) { - case "deregistration_delay.timeout_seconds": - d.Set("deregistration_delay", attr.Value) - case "lambda.multi_value_headers.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("lambda_multi_value_headers_enabled", enabled) - case "proxy_protocol_v2.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("proxy_protocol_v2", enabled) - case "deregistration_delay.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("connection_termination", enabled) - case "slow_start.duration_seconds": - slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("slow_start", slowStart) - case "load_balancing.algorithm.type": - loadBalancingAlgorithm := aws.StringValue(attr.Value) - d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) - case "load_balancing.cross_zone.enabled": - loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) - d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) - case "preserve_client_ip.enabled": - _, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return fmt.Errorf("converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("preserve_client_ip", attr.Value) - } - } - - stickinessAttr, err := flattenTargetGroupStickiness(attrResp.Attributes) - if err != nil { - return fmt.Errorf("flattening stickiness: %w", err) - } - - if err := d.Set("stickiness", stickinessAttr); err != nil { - return fmt.Errorf("setting stickiness: %w", err) - } - - targetHealthStateAttr, err := flattenTargetHealthState(attrResp.Attributes) - if err != nil { - return fmt.Errorf("flattening target health state: %w", err) - } - if err := d.Set("target_health_state", targetHealthStateAttr); err != nil { - return fmt.Errorf("setting target health state: %w", err) - } - - // Set target failover attributes for GWLB - targetFailoverAttr := flattenTargetGroupFailover(attrResp.Attributes) - if err != nil { - return fmt.Errorf("flattening target failover: %w", err) - } - - if err := d.Set("target_failover", targetFailoverAttr); err != nil { - return fmt.Errorf("setting target failover: %w", err) - } - - return nil -} - func flattenTargetHealthState(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { if len(attributes) == 0 { return []interface{}{}, nil From 5f091f8df7d7159083f4536158270c42361068bf Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Fri, 15 Dec 2023 09:33:26 -0600 Subject: [PATCH 240/438] chore: tweak doc fmt --- website/docs/d/ecr_image.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/ecr_image.html.markdown b/website/docs/d/ecr_image.html.markdown index 92886386cce..398af3f40cd 100644 --- a/website/docs/d/ecr_image.html.markdown +++ b/website/docs/d/ecr_image.html.markdown @@ -3,7 +3,7 @@ subcategory: "ECR (Elastic Container Registry)" layout: "aws" page_title: "AWS: aws_ecr_image" description: |- - Provides details about an ECR Image + Provides details about an ECR Image --- # Data Source: aws_ecr_image From c3513572b1a02bc5899bf3e100f31e115420251a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 10:50:08 -0500 Subject: [PATCH 241/438] r/aws_lb_target_group: Tidy up 'flattenTargetGroupStickinessAttributes'. --- internal/service/elbv2/target_group.go | 91 +++++++++---------- .../service/elbv2/target_group_data_source.go | 24 ++--- 2 files changed, 51 insertions(+), 64 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 2a95c61a3a8..a0a8d0ce229 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -600,13 +600,12 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i runtimeValidations(d, &diags) } - targetType := aws.StringValue(targetGroup.TargetType) - d.Set("arn", targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) d.Set("ip_address_type", targetGroup.IpAddressType) d.Set("name", targetGroup.TargetGroupName) d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) + targetType := aws.StringValue(targetGroup.TargetType) d.Set("target_type", targetType) if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { @@ -616,8 +615,10 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { d.Set("port", targetGroup.Port) } + var protocol string if _, ok := d.GetOk("protocol"); targetGroup.Protocol != nil || ok { - d.Set("protocol", targetGroup.Protocol) + protocol = aws.StringValue(targetGroup.Protocol) + d.Set("protocol", protocol) } if _, ok := d.GetOk("protocol_version"); targetGroup.ProtocolVersion != nil || ok { d.Set("protocol_version", targetGroup.ProtocolVersion) @@ -675,12 +676,7 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i } } - stickinessAttr, err := flattenTargetGroupStickiness(attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening stickiness: %s", err) - } - - if err := d.Set("stickiness", stickinessAttr); err != nil { + if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } @@ -1099,6 +1095,42 @@ func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protoco return apiObjects } +func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + + var stickinessType string + for _, apiObject := range apiObjects { + switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeStickinessEnabled: + tfMap["enabled"] = flex.StringToBoolValue(v) + case targetGroupAttributeStickinessType: + stickinessType = aws.StringValue(v) + tfMap["type"] = stickinessType + } + } + + switch protocol { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + for _, apiObject := range apiObjects { + k, v := aws.StringValue(apiObject.Key), apiObject.Value + switch { + case k == targetGroupAttributeStickinessLBCookieDurationSeconds && stickinessType == stickinessTypeLBCookie: + tfMap["cookie_duration"] = flex.StringToIntValue(v) + case k == targetGroupAttributeStickinessAppCookieCookieName && stickinessType == stickinessTypeAppCookie: + tfMap["cookie_name"] = aws.StringValue(v) + case k == targetGroupAttributeStickinessAppCookieDurationSeconds && stickinessType == stickinessTypeAppCookie: + tfMap["cookie_duration"] = flex.StringToIntValue(v) + } + } + } + + return tfMap +} + func flattenTargetHealthState(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { if len(attributes) == 0 { return []interface{}{}, nil @@ -1139,47 +1171,6 @@ func flattenTargetGroupFailover(attributes []*elbv2.TargetGroupAttribute) []inte return []interface{}{m} } -func flattenTargetGroupStickiness(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { - if len(attributes) == 0 { - return []interface{}{}, nil - } - - m := make(map[string]interface{}) - - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "stickiness.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting stickiness.enabled to bool: %s", aws.StringValue(attr.Value)) - } - m["enabled"] = enabled - case "stickiness.type": - m["type"] = aws.StringValue(attr.Value) - case "stickiness.lb_cookie.duration_seconds": - if sType, ok := m["type"].(string); !ok || sType == "lb_cookie" { - duration, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting stickiness.lb_cookie.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - m["cookie_duration"] = duration - } - case "stickiness.app_cookie.cookie_name": - m["cookie_name"] = aws.StringValue(attr.Value) - case "stickiness.app_cookie.duration_seconds": - if sType, ok := m["type"].(string); !ok || sType == "app_cookie" { - duration, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting stickiness.app_cookie.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - m["cookie_duration"] = duration - } - } - } - - return []interface{}{m}, nil -} - func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { healthCheck := make(map[string]any) if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index 1dd94c9293c..85b4dd1b326 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -232,24 +232,25 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } + var protocol string if v, _ := d.Get("target_type").(string); v != elbv2.TargetTypeEnumLambda { - d.Set("vpc_id", targetGroup.VpcId) d.Set("port", targetGroup.Port) - d.Set("protocol", targetGroup.Protocol) + protocol = aws.StringValue(targetGroup.Protocol) + d.Set("protocol", protocol) + d.Set("vpc_id", targetGroup.VpcId) } - switch d.Get("protocol").(string) { + switch protocol { case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: d.Set("protocol_version", targetGroup.ProtocolVersion) } - attrResp, err := conn.DescribeTargetGroupAttributesWithContext(ctx, &elbv2.DescribeTargetGroupAttributesInput{ - TargetGroupArn: aws.String(d.Id()), - }) + attributes, err := findTargetGroupAttributesByARN(ctx, conn, d.Id()) + if err != nil { - return sdkdiag.AppendErrorf(diags, "retrieving Target Group Attributes: %s", err) + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } - for _, attr := range attrResp.Attributes { + for _, attr := range attributes { switch aws.StringValue(attr.Key) { case "deregistration_delay.connection_termination.enabled": enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) @@ -296,12 +297,7 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta } } - stickinessAttr, err := flattenTargetGroupStickiness(attrResp.Attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening stickiness: %s", err) - } - - if err := d.Set("stickiness", stickinessAttr); err != nil { + if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } From 3d890581eeb43ef5a42ee97e1a8ff2a1fa6ec780 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 10:57:48 -0500 Subject: [PATCH 242/438] Fixup 'TestAccELBV2TargetGroup_Stickiness_updateStickinessType'. --- internal/service/elbv2/target_group_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 5a5f81c053a..602b7188380 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -1545,6 +1545,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", ""), resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), @@ -1596,7 +1597,7 @@ func TestAccELBV2TargetGroup_Stickiness_updateStickinessType(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "stickiness.#", "1"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.enabled", "true"), resource.TestCheckResourceAttr(resourceName, "stickiness.0.type", "lb_cookie"), - resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", "Cookie"), + resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_name", ""), resource.TestCheckResourceAttr(resourceName, "stickiness.0.cookie_duration", "10000"), resource.TestCheckResourceAttr(resourceName, "health_check.#", "1"), resource.TestCheckResourceAttr(resourceName, "health_check.0.path", "/health2"), From 36f3c805c9297e1ce1cbf11aa974c02e5383f52e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:00:07 -0500 Subject: [PATCH 243/438] Tweak CHANGELOG entry. --- .changelog/31436.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/31436.txt b/.changelog/31436.txt index c36d19086b4..cd5e71e36c0 100644 --- a/.changelog/31436.txt +++ b/.changelog/31436.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/aws_lb_target_group: Persist `stickiness.app_cookie.cookie_name` across changes between app_cookie and lb_cookie ALB stickiness +resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ``` \ No newline at end of file From cbd209e22b6051b9642b51ab96166336794cc58b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:26:58 -0500 Subject: [PATCH 244/438] autoflex: Add block key map --- internal/framework/flex/autoflex.go | 5 +++++ internal/framework/flex/autoflex_test.go | 23 +++++++++++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/internal/framework/flex/autoflex.go b/internal/framework/flex/autoflex.go index 69cd93c72d5..fda37eb9b05 100644 --- a/internal/framework/flex/autoflex.go +++ b/internal/framework/flex/autoflex.go @@ -18,6 +18,7 @@ type ResourcePrefixCtxKey string const ( ResourcePrefix ResourcePrefixCtxKey = "RESOURCE_PREFIX" ResourcePrefixRecurse ResourcePrefixCtxKey = "RESOURCE_PREFIX_RECURSE" + BlockKeyMap = "TFBlockKeyMap" ) // Expand = TF --> AWS @@ -94,6 +95,10 @@ func autoFlexConvertStruct(ctx context.Context, from any, to any, flexer autoFle if fieldName == "Tags" { continue // Resource tags are handled separately. } + if fieldName == BlockKeyMap { + continue + } + toFieldVal := findFieldFuzzy(ctx, fieldName, valTo, valFrom) if !toFieldVal.IsValid() { continue // Corresponding field not found in to. diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 0c86261d0d6..491874db027 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -297,3 +297,26 @@ type TestFlexTF18 struct { Field5 fwtypes.MapValueOf[types.String] `tfsdk:"field5"` Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` } + +type TestFlexBlockKeyMapTF01 struct { + BlockMap fwtypes.ListNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +} + +type TestFlexBlockKeyMapTF02 struct { + TFBlockKeyMap types.String `tfsdk:"block_key_map"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` +} + +type TestFlexBlockKeyMapAWS01 struct { + BlockMap map[string]TestFlexBlockKeyMapAWS02 +} + +type TestFlexBlockKeyMapAWS02 struct { + Attr1 string + Attr2 string +} + +type TestFlexBlockKeyMapAWS03 struct { + BlockMap map[string]*TestFlexBlockKeyMapAWS02 +} From 9dfa74c25eb60906709728dfa0d139b6fd3d289c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:27:51 -0500 Subject: [PATCH 245/438] autoflex/expand: Allow expanding block key maps --- internal/framework/flex/auto_expand.go | 92 ++++++++++++++++++++- internal/framework/flex/auto_expand_test.go | 90 ++++++++++++++++++++ 2 files changed, 181 insertions(+), 1 deletion(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index 9709ff6bb9f..36b8d10fe1e 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -525,11 +525,27 @@ func (expander autoExpander) nestedObject(ctx context.Context, vFrom fwtypes.Nes return diags } + case reflect.Map: + switch tElem := tTo.Elem(); tElem.Kind() { + case reflect.Struct: + // + // types.List(OfObject) -> map[string]struct + // + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + return diags + case reflect.Ptr: + // + // types.List(OfObject) -> map[string]*struct + // + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + return diags + } + case reflect.Slice: switch tElem := tTo.Elem(); tElem.Kind() { case reflect.Struct: // - // types.List(OfObject) -> []struct. + // types.List(OfObject) -> []struct // diags.Append(expander.nestedObjectToSlice(ctx, vFrom, tTo, tElem, vTo)...) return diags @@ -614,6 +630,51 @@ func (expander autoExpander) nestedObjectToSlice(ctx context.Context, vFrom fwty return diags } +// nestedKeyObjectToMap copies a Plugin Framework NestedObjectValue to a compatible AWS API map[string]struct value. +func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwtypes.NestedObjectValue, tSlice, tElem reflect.Type, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + // Get the nested Objects as a slice. + from, d := vFrom.ToObjectSlice(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + if tElem.Kind() == reflect.Ptr { + tElem = tElem.Elem() + } + + // Create a new target slice and expand each element. + f := reflect.ValueOf(from) + m := reflect.MakeMap(vTo.Type()) + for i := 0; i < f.Len(); i++ { + // Create a new target structure and walk its fields. + target := reflect.New(tElem) + diags.Append(autoFlexConvertStruct(ctx, f.Index(i).Interface(), target.Interface(), expander)...) + if diags.HasError() { + return diags + } + + key, d := blockKeyMap(ctx, f.Index(i).Interface()) + diags.Append(d...) + if diags.HasError() { + return diags + } + + // Set value (or pointer) in the target map. + if vTo.Type().Elem().Kind() == reflect.Struct { + m.SetMapIndex(key, target.Elem()) + } else { + m.SetMapIndex(key, target) + } + } + + vTo.Set(m) + + return diags +} + // objectMap copies a Plugin Framework ObjectMapValue value to a compatible AWS API value. func (expander autoExpander) objectMap(ctx context.Context, vFrom fwtypes.ObjectMapValue, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics @@ -684,3 +745,32 @@ func (expander autoExpander) mappedObjectToStruct(ctx context.Context, vFrom fwt return diags } + +// blockKeyMap takes a struct and extracts the value of the `key` +func blockKeyMap(ctx context.Context, from any) (reflect.Value, diag.Diagnostics) { + var diags diag.Diagnostics + + valFrom := reflect.ValueOf(from) + if kind := valFrom.Kind(); kind == reflect.Ptr { + valFrom = valFrom.Elem() + } + + for i, typFrom := 0, valFrom.Type(); i < typFrom.NumField(); i++ { + field := typFrom.Field(i) + if field.PkgPath != "" { + continue // Skip unexported fields. + } + + // go from StringValue to string + if field.Name == BlockKeyMap { + if v, ok := valFrom.Field(i).Interface().(basetypes.StringValue); ok { + return reflect.ValueOf(v.ValueString()), diags + } + return valFrom.Field(i), diags + } + } + + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + + return reflect.Zero(reflect.TypeOf("")), diags +} diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index e5b8c39aa22..4781197f00f 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -575,6 +575,96 @@ func TestExpandGeneric(t *testing.T) { }, }, }, + { + TestName: "block key map", + Source: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS01{}, + WantTarget: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + { + TestName: "block key map ptr source", + Source: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS01{}, + WantTarget: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, + { + TestName: "block key map ptr both", + Source: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS03{}, + WantTarget: &TestFlexBlockKeyMapAWS03{ + BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, { TestName: "complex nesting", Source: &TestFlexComplexNestTF01{ From 9182ec4523a5e88fb51f267e19da68ed4defc7e6 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:28:09 -0500 Subject: [PATCH 246/438] autoflex/flatten: Allow flattening block key maps --- internal/framework/flex/auto_flatten.go | 129 +++++++++++++++++++ internal/framework/flex/auto_flatten_test.go | 90 +++++++++++++ 2 files changed, 219 insertions(+) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 36859d08985..898888243f4 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -508,6 +508,14 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT switch tMapElem := vFrom.Type().Elem(); tMapElem.Kind() { case reflect.Struct: switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // map[string]struct -> fwtypes.ListNestedObjectOf[Object] + // + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case basetypes.MapTypable: // // map[string]struct -> fwtypes.ObjectMapOf[Object] @@ -565,8 +573,21 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT diags.Append(flattener.structMapToObjectMap(ctx, vFrom, tTo, vTo)...) return diags } + + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case reflect.String: switch tTo := tTo.(type) { + case basetypes.ListTypable: + // + // map[string]struct -> fwtypes.ListNestedObjectOf[Object] + // + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case basetypes.MapTypable: // // map[string]*string -> types.Map(OfString). @@ -671,6 +692,76 @@ func (flattener autoFlattener) structMapToObjectMap(ctx context.Context, vFrom r return diags } +func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + if vFrom.IsNil() { + val, d := tTo.NullValue(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + return diags + } + + n := vFrom.Len() + to, d := tTo.NewObjectSlice(ctx, n, n) + diags.Append(d...) + if diags.HasError() { + return diags + } + + t := reflect.ValueOf(to) + + //tStruct := t.Type().Elem() + //if tStruct.Kind() == reflect.Ptr { + // tStruct = tStruct.Elem() + //} + + i := 0 + for _, key := range vFrom.MapKeys() { + //target := reflect.New(tStruct) + target, d := tTo.NewObjectPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + fromInterface := vFrom.MapIndex(key).Interface() + if vFrom.MapIndex(key).Kind() == reflect.Ptr { + fromInterface = vFrom.MapIndex(key).Elem().Interface() + } + + diags.Append(autoFlexConvertStruct(ctx, fromInterface, target, flattener)...) + if diags.HasError() { + return diags + } + + d = blockKeyMapSet(ctx, target, key.String()) + diags.Append(d...) + + t.Index(i).Set(reflect.ValueOf(target)) + i++ + //if t.Type().Elem().Kind() == reflect.Struct { + // t.SetMapIndex(key, target.Elem()) + //} else { + // t.SetMapIndex(key, target) + //} + } + + val, d := tTo.ValueFromObjectSlice(ctx, to) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + + return diags +} + // structToNestedObject copies an AWS API struct value to a compatible Plugin Framework NestedObjectValue value. func (flattener autoFlattener) structToNestedObject(ctx context.Context, vFrom reflect.Value, isNullFrom bool, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics @@ -758,3 +849,41 @@ func (flattener autoFlattener) sliceOfStructNestedObject(ctx context.Context, vF vTo.Set(reflect.ValueOf(val)) return diags } + +// blockKeyMapSet takes a struct and assigns the value of the `key` +func blockKeyMapSet(ctx context.Context, to any, key string) diag.Diagnostics { + var diags diag.Diagnostics + + valTo := reflect.ValueOf(to) + if kind := valTo.Kind(); kind == reflect.Ptr { + valTo = valTo.Elem() + } + + if valTo.Kind() != reflect.Struct { + diags.AddError("AutoFlEx", fmt.Sprintf("wrong type (%T), expected struct", valTo)) + return diags + } + + for i, typTo := 0, valTo.Type(); i < typTo.NumField(); i++ { + field := typTo.Field(i) + if field.PkgPath != "" { + continue // Skip unexported fields. + } + + // go to StringValue to string + if field.Name != BlockKeyMap { + continue + } + + if _, ok := valTo.Field(i).Interface().(basetypes.StringValue); ok { + valTo.Field(i).Set(reflect.ValueOf(basetypes.NewStringValue(key))) + return diags + } + + return diags + } + + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + + return diags +} diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index a5a2b9f424b..6115d01af4e 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -747,6 +747,96 @@ func TestFlattenGeneric(t *testing.T) { }), }, }, + { + TestName: "block key map", + Source: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF01{}, + WantTarget: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, + { + TestName: "block key map ptr source", + Source: &TestFlexBlockKeyMapAWS03{ + BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF01{}, + WantTarget: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, + { + TestName: "block key map ptr both", + Source: &TestFlexBlockKeyMapAWS03{ + BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF01{}, + WantTarget: &TestFlexBlockKeyMapTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, { TestName: "complex nesting", Source: &TestFlexComplexNestAWS01{ From 73e0f43d8759cc04b5da9e9d2c5a2b508b951c81 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Thu, 14 Dec 2023 20:32:40 -0500 Subject: [PATCH 247/438] autoflex/flatten: Fix comment --- internal/framework/flex/auto_flatten.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 898888243f4..0e5450718c5 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -870,7 +870,6 @@ func blockKeyMapSet(ctx context.Context, to any, key string) diag.Diagnostics { continue // Skip unexported fields. } - // go to StringValue to string if field.Name != BlockKeyMap { continue } From 6c80e0370084ab8d93aa9dcde62f77e9941e64ea Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:13:32 -0500 Subject: [PATCH 248/438] r/aws_lb_target_group: Tidy up 'flattenTargetGroupHealthStateAttributes'. --- internal/service/elbv2/target_group.go | 88 ++++++++++++-------------- 1 file changed, 42 insertions(+), 46 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index a0a8d0ce229..92e49af41e6 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -459,6 +459,10 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { attributes = append(attributes, &elbv2.TargetGroupAttribute{ Key: aws.String("deregistration_delay.timeout_seconds"), @@ -525,22 +529,6 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta ) } } - - // Only supported for TCP & TLS protocols - if v, ok := d.Get("protocol").(string); ok { - if v == elbv2.ProtocolEnumTcp || v == elbv2.ProtocolEnumTls { - if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 { - targetHealthStateBlock := v.([]interface{}) - targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), - }, - ) - } - } - } case elbv2.TargetTypeEnumLambda: if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -680,12 +668,8 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } - targetHealthStateAttr, err := flattenTargetHealthState(attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening target health state: %s", err) - } - if err := d.Set("target_health_state", targetHealthStateAttr); err != nil { - return sdkdiag.AppendErrorf(diags, "setting target health state: %s", err) + if err := d.Set("target_health_state", []interface{}{flattenTargetGroupHealthStateAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) } // Set target failover attributes for GWLB @@ -768,6 +752,12 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } + if d.HasChange("target_health_state") { + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + } + if d.HasChange("deregistration_delay") { if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -819,18 +809,6 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta }) } - if d.HasChange("target_health_state") { - targetHealthStateBlock := d.Get("target_health_state").([]interface{}) - if len(targetHealthStateBlock) == 1 { - targetHealthState := targetHealthStateBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_health_state.unhealthy.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(targetHealthState["enable_unhealthy_connection_termination"].(bool))), - }) - } - } - if d.HasChange("target_failover") { failoverBlock := d.Get("target_failover").([]interface{}) if len(failoverBlock) == 1 { @@ -1131,25 +1109,43 @@ func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttri return tfMap } -func flattenTargetHealthState(attributes []*elbv2.TargetGroupAttribute) ([]interface{}, error) { - if len(attributes) == 0 { - return []interface{}{}, nil +func expandTargetGroupHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { + if tfMap == nil { + return nil } - m := make(map[string]interface{}) + var apiObjects []*elbv2.TargetGroupAttribute - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "target_health_state.unhealthy.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return nil, fmt.Errorf("converting target_health_state.unhealthy.connection_termination to bool: %s", aws.StringValue(attr.Value)) + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), + Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), + }) + } + + return apiObjects +} + +func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + for _, apiObject := range apiObjects { + switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: + tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) } - m["enable_unhealthy_connection_termination"] = enabled } } - return []interface{}{m}, nil + return tfMap } func flattenTargetGroupFailover(attributes []*elbv2.TargetGroupAttribute) []interface{} { From 5c710890bd3360741257ecfaf66c726d02d6606f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:24:02 -0500 Subject: [PATCH 249/438] r/aws_lb_target_group: Tidy up 'flattenTargetGroupTargetFailoverAttributes'. --- internal/service/elbv2/target_group.go | 144 ++++++++++++------------- 1 file changed, 69 insertions(+), 75 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 92e49af41e6..6418067aba7 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -459,8 +459,12 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupStickinessAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } + if v, ok := d.GetOk("target_failover"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupTargetFailoverAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { @@ -511,24 +515,6 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta Value: aws.String(fmt.Sprintf("%d", v.(int))), }) } - - // Only supported for GWLB - if protocol == elbv2.ProtocolEnumGeneve { - if v, ok := d.GetOk("target_failover"); ok { - failoverBlock := v.([]interface{}) - failover := failoverBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_deregistration"), - Value: aws.String(failover["on_deregistration"].(string)), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_unhealthy"), - Value: aws.String(failover["on_unhealthy"].(string)), - }, - ) - } - } case elbv2.TargetTypeEnumLambda: if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -621,6 +607,18 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } + if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) + } + + if err := d.Set("target_failover", []interface{}{flattenTargetGroupTargetFailoverAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_failover: %s", err) + } + + if err := d.Set("target_health_state", []interface{}{flattenTargetGroupTargetHealthStateAttributes(attributes, protocol)}); err != nil { + return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) + } + for _, attr := range attributes { switch aws.StringValue(attr.Key) { case "deregistration_delay.timeout_seconds": @@ -664,24 +662,6 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i } } - if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) - } - - if err := d.Set("target_health_state", []interface{}{flattenTargetGroupHealthStateAttributes(attributes, protocol)}); err != nil { - return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) - } - - // Set target failover attributes for GWLB - targetFailoverAttr := flattenTargetGroupFailover(attributes) - if err != nil { - return sdkdiag.AppendErrorf(diags, "flattening target failover: %s", err) - } - - if err := d.Set("target_failover", targetFailoverAttr); err != nil { - return sdkdiag.AppendErrorf(diags, "setting target failover: %s", err) - } - return diags } @@ -752,9 +732,15 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta } } + if d.HasChange("target_failover") { + if v, ok := d.GetOk("target_failover"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + attributes = append(attributes, expandTargetGroupTargetFailoverAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + } + } + if d.HasChange("target_health_state") { if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - attributes = append(attributes, expandTargetGroupHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) + attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } } @@ -808,24 +794,6 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta Value: aws.String(d.Get("load_balancing_cross_zone_enabled").(string)), }) } - - if d.HasChange("target_failover") { - failoverBlock := d.Get("target_failover").([]interface{}) - if len(failoverBlock) == 1 { - failover := failoverBlock[0].(map[string]interface{}) - attributes = append(attributes, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_deregistration"), - Value: aws.String(failover["on_deregistration"].(string)), - }, - &elbv2.TargetGroupAttribute{ - Key: aws.String("target_failover.on_unhealthy"), - Value: aws.String(failover["on_unhealthy"].(string)), - }, - ) - } - } - case elbv2.TargetTypeEnumLambda: if d.HasChange("lambda_multi_value_headers_enabled") { attributes = append(attributes, &elbv2.TargetGroupAttribute{ @@ -1109,7 +1077,7 @@ func flattenTargetGroupStickinessAttributes(apiObjects []*elbv2.TargetGroupAttri return tfMap } -func expandTargetGroupHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { +func expandTargetGroupTargetFailoverAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { if tfMap == nil { return nil } @@ -1117,18 +1085,22 @@ func expandTargetGroupHealthStateAttributes(tfMap map[string]interface{}, protoc var apiObjects []*elbv2.TargetGroupAttribute switch protocol { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + case elbv2.ProtocolEnumGeneve: apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), - Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), + Key: aws.String(targetGroupAttributeTargetFailoverOnDeregistration), + Value: aws.String(tfMap["on_deregistration"].(string)), + }, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetFailoverOnUnhealthy), + Value: aws.String(tfMap["on_unhealthy"].(string)), }) } return apiObjects } -func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { +func flattenTargetGroupTargetFailoverAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { if len(apiObjects) == 0 { return nil } @@ -1136,11 +1108,13 @@ func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttr tfMap := map[string]interface{}{} switch protocol { - case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + case elbv2.ProtocolEnumGeneve: for _, apiObject := range apiObjects { switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { - case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: - tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) + case targetGroupAttributeTargetFailoverOnDeregistration: + tfMap["on_deregistration"] = aws.StringValue(v) + case targetGroupAttributeTargetFailoverOnUnhealthy: + tfMap["on_unhealthy"] = aws.StringValue(v) } } } @@ -1148,23 +1122,43 @@ func flattenTargetGroupHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttr return tfMap } -func flattenTargetGroupFailover(attributes []*elbv2.TargetGroupAttribute) []interface{} { - if len(attributes) == 0 { - return []interface{}{} +func expandTargetGroupTargetHealthStateAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { + if tfMap == nil { + return nil } - m := make(map[string]interface{}) + var apiObjects []*elbv2.TargetGroupAttribute - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "target_failover.on_deregistration": - m["on_deregistration"] = aws.StringValue(attr.Value) - case "target_failover.on_unhealthy": - m["on_unhealthy"] = aws.StringValue(attr.Value) + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + apiObjects = append(apiObjects, + &elbv2.TargetGroupAttribute{ + Key: aws.String(targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled), + Value: flex.BoolValueToString(tfMap["enable_unhealthy_connection_termination"].(bool)), + }) + } + + return apiObjects +} + +func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGroupAttribute, protocol string) map[string]interface{} { + if len(apiObjects) == 0 { + return nil + } + + tfMap := map[string]interface{}{} + + switch protocol { + case elbv2.ProtocolEnumTcp, elbv2.ProtocolEnumTls: + for _, apiObject := range apiObjects { + switch k, v := aws.StringValue(apiObject.Key), apiObject.Value; k { + case targetGroupAttributeTargetHealthStateUnhealthyConnectionTerminationEnabled: + tfMap["enable_unhealthy_connection_termination"] = flex.StringToBoolValue(v) + } } } - return []interface{}{m} + return tfMap } func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { From d04f929845e551669413f1a9aed54bea0822a354 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 11:56:36 -0500 Subject: [PATCH 250/438] Add 'flex.Int64ValueToString'. --- internal/flex/flex.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/flex/flex.go b/internal/flex/flex.go index 34c417583b1..171c2e120bc 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -305,6 +305,11 @@ func IntValueToString(v int) *string { return aws.String(strconv.Itoa(v)) } +// Int64ValueToString converts a Go int64 value to a string pointer. +func Int64ValueToString(v int64) *string { + return aws.String(strconv.FormatInt(v, 10)) +} + // StringToIntValue converts a string pointer to a Go int value. // Invalid integer strings are converted to 0. func StringToIntValue(v *string) int { From 2ab3f1371ef67b8649e4ee5702c279a2d6cf450c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 12:04:20 -0500 Subject: [PATCH 251/438] r/aws_lb_target_group: Add and use 'targetGroupAttributeMap'. --- internal/service/elbv2/target_group.go | 321 +++++++++++++------------ 1 file changed, 165 insertions(+), 156 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index 6418067aba7..d523f6edfd9 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -31,6 +31,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/types/nullable" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" + "golang.org/x/exp/slices" ) // @SDKResource("aws_alb_target_group", name="Target Group") @@ -466,64 +467,10 @@ func resourceTargetGroupCreate(ctx context.Context, d *schema.ResourceData, meta if v, ok := d.GetOk("target_health_state"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } - - if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.timeout_seconds"), - Value: aws.String(fmt.Sprintf("%d", v)), - }) - } - - if v, ok := d.GetOk("load_balancing_algorithm_type"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.algorithm.type"), - Value: aws.String(v.(string)), - }) - } - - if v, ok := d.GetOk("load_balancing_cross_zone_enabled"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.cross_zone.enabled"), - Value: aws.String(v.(string)), - }) - } - - if v, ok := d.GetOk("preserve_client_ip"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("preserve_client_ip.enabled"), - Value: aws.String(v.(string)), - }) - } - - if v, ok := d.GetOk("proxy_protocol_v2"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("proxy_protocol_v2.enabled"), - Value: aws.String(strconv.FormatBool(v.(bool))), - }) - } - - if v, ok := d.GetOk("connection_termination"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(v.(bool))), - }) - } - - if v, ok := d.GetOk("slow_start"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("slow_start.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", v.(int))), - }) - } - case elbv2.TargetTypeEnumLambda: - if v, ok := d.GetOk("lambda_multi_value_headers_enabled"); ok { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("lambda.multi_value_headers.enabled"), - Value: aws.String(strconv.FormatBool(v.(bool))), - }) - } } + attributes = append(attributes, targetGroupAttributes.expand(d, targetType, false)...) + if len(attributes) > 0 { input := &elbv2.ModifyTargetGroupAttributesInput{ Attributes: attributes, @@ -619,48 +566,7 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i return sdkdiag.AppendErrorf(diags, "setting target_health_state: %s", err) } - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "deregistration_delay.timeout_seconds": - d.Set("deregistration_delay", attr.Value) - case "lambda.multi_value_headers.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("lambda_multi_value_headers_enabled", enabled) - case "proxy_protocol_v2.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("proxy_protocol_v2", enabled) - case "deregistration_delay.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("connection_termination", enabled) - case "slow_start.duration_seconds": - slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("slow_start", slowStart) - case "load_balancing.algorithm.type": - loadBalancingAlgorithm := aws.StringValue(attr.Value) - d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) - case "load_balancing.cross_zone.enabled": - loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) - d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) - case "preserve_client_ip.enabled": - _, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("preserve_client_ip", attr.Value) - } - } + targetGroupAttributes.flatten(d, targetType, attributes) return diags } @@ -743,66 +649,10 @@ func resourceTargetGroupUpdate(ctx context.Context, d *schema.ResourceData, meta attributes = append(attributes, expandTargetGroupTargetHealthStateAttributes(v.([]interface{})[0].(map[string]interface{}), protocol)...) } } - - if d.HasChange("deregistration_delay") { - if v, null, _ := nullable.Int(d.Get("deregistration_delay").(string)).Value(); !null { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.timeout_seconds"), - Value: aws.String(fmt.Sprintf("%d", v)), - }) - } - } - - if d.HasChange("slow_start") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("slow_start.duration_seconds"), - Value: aws.String(fmt.Sprintf("%d", d.Get("slow_start").(int))), - }) - } - - if d.HasChange("proxy_protocol_v2") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("proxy_protocol_v2.enabled"), - Value: aws.String(strconv.FormatBool(d.Get("proxy_protocol_v2").(bool))), - }) - } - - if d.HasChange("connection_termination") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("deregistration_delay.connection_termination.enabled"), - Value: aws.String(strconv.FormatBool(d.Get("connection_termination").(bool))), - }) - } - - if d.HasChange("preserve_client_ip") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("preserve_client_ip.enabled"), - Value: aws.String(d.Get("preserve_client_ip").(string)), - }) - } - - if d.HasChange("load_balancing_algorithm_type") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.algorithm.type"), - Value: aws.String(d.Get("load_balancing_algorithm_type").(string)), - }) - } - - if d.HasChange("load_balancing_cross_zone_enabled") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("load_balancing.cross_zone.enabled"), - Value: aws.String(d.Get("load_balancing_cross_zone_enabled").(string)), - }) - } - case elbv2.TargetTypeEnumLambda: - if d.HasChange("lambda_multi_value_headers_enabled") { - attributes = append(attributes, &elbv2.TargetGroupAttribute{ - Key: aws.String("lambda.multi_value_headers.enabled"), - Value: aws.String(strconv.FormatBool(d.Get("lambda_multi_value_headers_enabled").(bool))), - }) - } } + attributes = append(attributes, targetGroupAttributes.expand(d, targetType, true)...) + if len(attributes) > 0 { input := &elbv2.ModifyTargetGroupAttributesInput{ Attributes: attributes, @@ -837,6 +687,165 @@ func resourceTargetGroupDelete(ctx context.Context, d *schema.ResourceData, meta return diags } +type targetGroupAttributeInfo struct { + apiAttributeKey string + tfType schema.ValueType + tfNullableType schema.ValueType + targetTypesSupported []string +} + +type targetGroupAttributeMap map[string]targetGroupAttributeInfo + +var targetGroupAttributes = targetGroupAttributeMap(map[string]targetGroupAttributeInfo{ + "connection_termination": { + apiAttributeKey: targetGroupAttributeDeregistrationDelayConnectionTerminationEnabled, + tfType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "deregistration_delay": { + apiAttributeKey: targetGroupAttributeDeregistrationDelayTimeoutSeconds, + tfType: schema.TypeString, + tfNullableType: schema.TypeInt, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "lambda_multi_value_headers_enabled": { + apiAttributeKey: targetGroupAttributeLambdaMultiValueHeadersEnabled, + tfType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumLambda}, + }, + "load_balancing_algorithm_type": { + apiAttributeKey: targetGroupAttributeLoadBalancingAlgorithmType, + tfType: schema.TypeString, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "load_balancing_cross_zone_enabled": { + apiAttributeKey: targetGroupAttributeLoadBalancingCrossZoneEnabled, + tfType: schema.TypeString, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "preserve_client_ip": { + apiAttributeKey: targetGroupAttributePreserveClientIPEnabled, + tfType: schema.TypeString, + tfNullableType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "proxy_protocol_v2": { + apiAttributeKey: targetGroupAttributeProxyProtocolV2Enabled, + tfType: schema.TypeBool, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, + "slow_start": { + apiAttributeKey: targetGroupAttributeSlowStartDurationSeconds, + tfType: schema.TypeInt, + targetTypesSupported: []string{elbv2.TargetTypeEnumInstance, elbv2.TargetTypeEnumIp}, + }, +}) + +func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType string, update bool) []*elbv2.TargetGroupAttribute { + var apiObjects []*elbv2.TargetGroupAttribute + + for tfAttributeName, attributeInfo := range m { + if update && !d.HasChange(tfAttributeName) { + continue + } + + if !slices.Contains(attributeInfo.targetTypesSupported, targetType) { + continue + } + + switch v, nt, k := d.Get(tfAttributeName), attributeInfo.tfNullableType, aws.String(attributeInfo.apiAttributeKey); nt { + case schema.TypeBool: + v := v.(string) + if v, null, _ := nullable.Bool(v).Value(); !null { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + } + case schema.TypeInt: + v := v.(string) + if v, null, _ := nullable.Int(v).Value(); !null { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.Int64ValueToString(v), + }) + } + default: + switch attributeInfo.tfType { + case schema.TypeBool: + v := v.(bool) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + case schema.TypeInt: + v := v.(int) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.IntValueToString(v), + }) + case schema.TypeString: + if v := v.(string); v != "" { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: aws.String(v), + }) + } + } + } + + switch v, t, k := d.Get(tfAttributeName), attributeInfo.tfType, aws.String(attributeInfo.apiAttributeKey); t { + case schema.TypeBool: + v := v.(bool) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + case schema.TypeInt: + v := v.(int) + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.IntValueToString(v), + }) + case schema.TypeString: + if v := v.(string); v != "" { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: aws.String(v), + }) + } + } + } + + return apiObjects +} + +func (m targetGroupAttributeMap) flatten(d *schema.ResourceData, targetType string, apiObjects []*elbv2.TargetGroupAttribute) { + for tfAttributeName, attributeInfo := range m { + if !slices.Contains(attributeInfo.targetTypesSupported, targetType) { + continue + } + + k := attributeInfo.apiAttributeKey + i := slices.IndexFunc(apiObjects, func(v *elbv2.TargetGroupAttribute) bool { + return aws.StringValue(v.Key) == k + }) + + if i == -1 { + continue + } + + switch v, t := apiObjects[i].Value, attributeInfo.tfType; t { + case schema.TypeBool: + d.Set(tfAttributeName, flex.StringToBoolValue(v)) + case schema.TypeInt: + d.Set(tfAttributeName, flex.StringToIntValue(v)) + case schema.TypeString: + d.Set(tfAttributeName, v) + } + } +} + func FindTargetGroupByARN(ctx context.Context, conn *elbv2.ELBV2, arn string) (*elbv2.TargetGroup, error) { input := &elbv2.DescribeTargetGroupsInput{ TargetGroupArns: aws.StringSlice([]string{arn}), From 9512e32c73477c5945e72dc7ff097e8c4aa87e3b Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:07:46 -0500 Subject: [PATCH 252/438] autoflex: Lint --- internal/framework/flex/auto_expand.go | 10 +++++----- internal/framework/flex/auto_flatten.go | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index 36b8d10fe1e..cfb464dacaf 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -531,13 +531,13 @@ func (expander autoExpander) nestedObject(ctx context.Context, vFrom fwtypes.Nes // // types.List(OfObject) -> map[string]struct // - diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tElem, vTo)...) return diags case reflect.Ptr: // // types.List(OfObject) -> map[string]*struct // - diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tTo, tElem, vTo)...) + diags.Append(expander.nestedKeyObjectToMap(ctx, vFrom, tElem, vTo)...) return diags } @@ -631,7 +631,7 @@ func (expander autoExpander) nestedObjectToSlice(ctx context.Context, vFrom fwty } // nestedKeyObjectToMap copies a Plugin Framework NestedObjectValue to a compatible AWS API map[string]struct value. -func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwtypes.NestedObjectValue, tSlice, tElem reflect.Type, vTo reflect.Value) diag.Diagnostics { +func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwtypes.NestedObjectValue, tElem reflect.Type, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics // Get the nested Objects as a slice. @@ -656,7 +656,7 @@ func (expander autoExpander) nestedKeyObjectToMap(ctx context.Context, vFrom fwt return diags } - key, d := blockKeyMap(ctx, f.Index(i).Interface()) + key, d := blockKeyMap(f.Index(i).Interface()) diags.Append(d...) if diags.HasError() { return diags @@ -747,7 +747,7 @@ func (expander autoExpander) mappedObjectToStruct(ctx context.Context, vFrom fwt } // blockKeyMap takes a struct and extracts the value of the `key` -func blockKeyMap(ctx context.Context, from any) (reflect.Value, diag.Diagnostics) { +func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { var diags diag.Diagnostics valFrom := reflect.ValueOf(from) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 0e5450718c5..e2fe57c538c 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -739,7 +739,7 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom return diags } - d = blockKeyMapSet(ctx, target, key.String()) + d = blockKeyMapSet(target, key.String()) diags.Append(d...) t.Index(i).Set(reflect.ValueOf(target)) @@ -851,7 +851,7 @@ func (flattener autoFlattener) sliceOfStructNestedObject(ctx context.Context, vF } // blockKeyMapSet takes a struct and assigns the value of the `key` -func blockKeyMapSet(ctx context.Context, to any, key string) diag.Diagnostics { +func blockKeyMapSet(to any, key string) diag.Diagnostics { var diags diag.Diagnostics valTo := reflect.ValueOf(to) From 6e1f3f4821cb034c86c76d2c8274671005785574 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:16:14 -0500 Subject: [PATCH 253/438] autoflex: Sort slices for testing with cmp --- internal/framework/flex/auto_flatten_test.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 6115d01af4e..f7854197ef1 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -5,11 +5,13 @@ package flex import ( "context" + "fmt" "testing" "time" "github.com/aws/aws-sdk-go-v2/aws" "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/terraform-plugin-framework/attr" "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-framework/types/basetypes" @@ -902,11 +904,13 @@ func TestFlattenGeneric(t *testing.T) { t.Errorf("gotErr = %v, wantErr = %v", gotErr, testCase.WantErr) } + less := func(a, b any) bool { return fmt.Sprint(a) < fmt.Sprint(b) } + if gotErr { if !testCase.WantErr { t.Errorf("err = %q", err) } - } else if diff := cmp.Diff(testCase.Target, testCase.WantTarget); diff != "" { + } else if diff := cmp.Diff(testCase.Target, testCase.WantTarget, cmpopts.SortSlices(less)); diff != "" { t.Errorf("unexpected diff (+wanted, -got): %s", diff) } }) From 13fc8771a2fdf8e213d2a31bcd679a70742a6079 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Fri, 15 Dec 2023 11:29:36 -0600 Subject: [PATCH 254/438] add CHANGELOG entry --- .changelog/34848.txt | 3 +++ internal/service/dynamodb/status.go | 4 ++++ internal/service/dynamodb/table.go | 4 ++-- 3 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 .changelog/34848.txt diff --git a/.changelog/34848.txt b/.changelog/34848.txt new file mode 100644 index 00000000000..14b3201a669 --- /dev/null +++ b/.changelog/34848.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created +``` \ No newline at end of file diff --git a/internal/service/dynamodb/status.go b/internal/service/dynamodb/status.go index b6c5d92c88d..ed06fafaf84 100644 --- a/internal/service/dynamodb/status.go +++ b/internal/service/dynamodb/status.go @@ -56,6 +56,10 @@ func statusImport(ctx context.Context, conn *dynamodb.DynamoDB, importArn string } output, err := conn.DescribeImportWithContext(ctx, describeImportInput) + if tfawserr.ErrCodeEquals(err, dynamodb.ErrCodeResourceNotFoundException) { + return nil, "", nil + } + if err != nil { return nil, "", err } diff --git a/internal/service/dynamodb/table.go b/internal/service/dynamodb/table.go index bdbe09fbffe..3c3746fc296 100644 --- a/internal/service/dynamodb/table.go +++ b/internal/service/dynamodb/table.go @@ -623,9 +623,9 @@ func resourceTableCreate(ctx context.Context, d *schema.ResourceData, meta inter } importArn := importTableOutput.(*dynamodb.ImportTableOutput).ImportTableDescription.ImportArn - if _, err = waitImportComplete(ctx, conn, *importArn, d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err = waitImportComplete(ctx, conn, aws.StringValue(importArn), d.Timeout(schema.TimeoutCreate)); err != nil { d.SetId(tableName) - return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, d.Id(), err) + return create.AppendDiagError(diags, names.DynamoDB, create.ErrActionCreating, ResNameTable, tableName, err) } } else { input := &dynamodb.CreateTableInput{ From 03a55b2bc80e5eeb77347f7a7a5738f16e9fbfc3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 12:30:35 -0500 Subject: [PATCH 255/438] r/aws_lb_target_group: Corrections. --- internal/service/elbv2/target_group.go | 46 ++++++--------------- internal/service/elbv2/target_group_test.go | 4 -- 2 files changed, 13 insertions(+), 37 deletions(-) diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index d523f6edfd9..dcfdaed20a9 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -773,19 +773,21 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin default: switch attributeInfo.tfType { case schema.TypeBool: - v := v.(bool) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.BoolValueToString(v), - }) + if v := v.(bool); v || update { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.BoolValueToString(v), + }) + } case schema.TypeInt: - v := v.(int) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.IntValueToString(v), - }) + if v := v.(int); v > 0 || update { + apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ + Key: k, + Value: flex.IntValueToString(v), + }) + } case schema.TypeString: - if v := v.(string); v != "" { + if v := v.(string); v != "" || update { apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ Key: k, Value: aws.String(v), @@ -793,28 +795,6 @@ func (m targetGroupAttributeMap) expand(d *schema.ResourceData, targetType strin } } } - - switch v, t, k := d.Get(tfAttributeName), attributeInfo.tfType, aws.String(attributeInfo.apiAttributeKey); t { - case schema.TypeBool: - v := v.(bool) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.BoolValueToString(v), - }) - case schema.TypeInt: - v := v.(int) - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: flex.IntValueToString(v), - }) - case schema.TypeString: - if v := v.(string); v != "" { - apiObjects = append(apiObjects, &elbv2.TargetGroupAttribute{ - Key: k, - Value: aws.String(v), - }) - } - } } return apiObjects diff --git a/internal/service/elbv2/target_group_test.go b/internal/service/elbv2/target_group_test.go index 602b7188380..14ae03e63d0 100644 --- a/internal/service/elbv2/target_group_test.go +++ b/internal/service/elbv2/target_group_test.go @@ -4083,10 +4083,6 @@ func testAccCheckTargetGroupExists(ctx context.Context, n string, v *elbv2.Targe return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No ELBv2 Target Group ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) output, err := tfelbv2.FindTargetGroupByARN(ctx, conn, rs.Primary.ID) From b537cde7c6c386d7dbdd81cf5f7fdc66d7b301e9 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:35:47 -0500 Subject: [PATCH 256/438] autoflex: Testing ordering --- internal/framework/flex/auto_flatten.go | 11 ----------- internal/framework/flex/auto_flatten_test.go | 20 +------------------- 2 files changed, 1 insertion(+), 30 deletions(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index e2fe57c538c..fd118d06cb2 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -715,14 +715,8 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom t := reflect.ValueOf(to) - //tStruct := t.Type().Elem() - //if tStruct.Kind() == reflect.Ptr { - // tStruct = tStruct.Elem() - //} - i := 0 for _, key := range vFrom.MapKeys() { - //target := reflect.New(tStruct) target, d := tTo.NewObjectPtr(ctx) diags.Append(d...) if diags.HasError() { @@ -744,11 +738,6 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom t.Index(i).Set(reflect.ValueOf(target)) i++ - //if t.Type().Elem().Kind() == reflect.Struct { - // t.SetMapIndex(key, target.Elem()) - //} else { - // t.SetMapIndex(key, target) - //} } val, d := tTo.ValueFromObjectSlice(ctx, to) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index f7854197ef1..41702f250e6 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -757,10 +757,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - "y": { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexBlockKeyMapTF01{}, @@ -771,11 +767,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, @@ -787,10 +778,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - "y": { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexBlockKeyMapTF01{}, @@ -801,11 +788,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, @@ -904,7 +886,7 @@ func TestFlattenGeneric(t *testing.T) { t.Errorf("gotErr = %v, wantErr = %v", gotErr, testCase.WantErr) } - less := func(a, b any) bool { return fmt.Sprint(a) < fmt.Sprint(b) } + less := func(a, b any) bool { return fmt.Sprintf("%+v", a) < fmt.Sprintf("%+v", b) } if gotErr { if !testCase.WantErr { From d4d4cd5ede4b6e81d3f15a5b7b04f73014a2a91c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 12:37:32 -0500 Subject: [PATCH 257/438] autoflex: Testing ordering --- internal/framework/flex/auto_flatten_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 41702f250e6..ed98e88250e 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -799,10 +799,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - "y": { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexBlockKeyMapTF01{}, @@ -813,11 +809,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, From 8638cb636d9f9c9c8f87be4cb1c56798e6ff1490 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 12:45:05 -0500 Subject: [PATCH 258/438] Add 'verify.StringHasPrefix'. --- internal/verify/validate.go | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/internal/verify/validate.go b/internal/verify/validate.go index 57a0d9752e2..cf2390c83cd 100644 --- a/internal/verify/validate.go +++ b/internal/verify/validate.go @@ -460,6 +460,23 @@ func FloatGreaterThan(threshold float64) schema.SchemaValidateFunc { } } +func StringHasPrefix(prefix string) schema.SchemaValidateFunc { + return func(v interface{}, k string) (warnings []string, errors []error) { + s, ok := v.(string) + if !ok { + errors = append(errors, fmt.Errorf("expected type of %s to be string", k)) + return + } + + if !strings.HasPrefix(s, prefix) { + errors = append(errors, fmt.Errorf("expected %s to have prefix %s, got %s", k, prefix, s)) + return + } + + return warnings, errors + } +} + func ValidServicePrincipal(v interface{}, k string) (ws []string, errors []error) { value := v.(string) From 75a91831b713db097bb0ed61a823beb5f03f479f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 15 Dec 2023 13:25:14 -0500 Subject: [PATCH 259/438] build(deps): bump the terraform-devex group with 2 updates (#34941) Bumps the terraform-devex group with 2 updates: [github.com/hashicorp/terraform-plugin-mux](https://github.com/hashicorp/terraform-plugin-mux) and [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk). Updates `github.com/hashicorp/terraform-plugin-mux` from 0.12.0 to 0.13.0 - [Release notes](https://github.com/hashicorp/terraform-plugin-mux/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-mux/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-mux/compare/v0.12.0...v0.13.0) Updates `github.com/hashicorp/terraform-plugin-sdk/v2` from 2.30.0 to 2.31.0 - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.30.0...v2.31.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-mux dependency-type: direct:production update-type: version-update:semver-minor dependency-group: terraform-devex - dependency-name: github.com/hashicorp/terraform-plugin-sdk/v2 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: terraform-devex ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 16 ++++++++-------- go.sum | 37 ++++++++++++++++++------------------- 2 files changed, 26 insertions(+), 27 deletions(-) diff --git a/go.mod b/go.mod index 553689a5b38..78a783a30a1 100644 --- a/go.mod +++ b/go.mod @@ -117,10 +117,10 @@ require ( github.com/hashicorp/terraform-plugin-framework v1.4.2 github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 - github.com/hashicorp/terraform-plugin-go v0.19.1 + github.com/hashicorp/terraform-plugin-go v0.20.0 github.com/hashicorp/terraform-plugin-log v0.9.0 - github.com/hashicorp/terraform-plugin-mux v0.12.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 + github.com/hashicorp/terraform-plugin-mux v0.13.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/hashicorp/terraform-plugin-testing v1.6.0 github.com/jmespath/go-jmespath v0.4.0 github.com/mattbaird/jsonpatch v0.0.0-20230413205102-771768614e91 @@ -176,8 +176,8 @@ require ( github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect - github.com/hashicorp/go-plugin v1.5.2 // indirect - github.com/hashicorp/hc-install v0.6.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect + github.com/hashicorp/hc-install v0.6.2 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.19.0 // indirect github.com/hashicorp/terraform-json v0.18.0 // indirect @@ -206,13 +206,13 @@ require ( go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/mod v0.13.0 // indirect + golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.19.0 // indirect golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/grpc v1.59.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.60.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index 1e073c94d70..dd14e373ab9 100644 --- a/go.sum +++ b/go.sum @@ -13,7 +13,6 @@ github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/YakDriver/regexache v0.23.0 h1:kv3j4XKhbx/vqUilSBgizXDUXHvvH1KdYekdmGwz4C4= github.com/YakDriver/regexache v0.23.0/go.mod h1:K4BZ3MYKAqSFbYWqmbsG+OzYUDyJjnMEr27DJEsVG3U= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -274,7 +273,7 @@ github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlL github.com/gertd/go-pluralize v0.2.1/go.mod h1:rbYaKDbsXxmRfr8uygAEKhOWsjyrrqrkHVpZvoOp8zk= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= +github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -320,15 +319,15 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= -github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= -github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= +github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= +github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= @@ -343,12 +342,12 @@ github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaK github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= -github.com/hashicorp/terraform-plugin-go v0.19.1 h1:lf/jTGTeELcz5IIbn/94mJdmnTjRYm6S6ct/JqCSr50= -github.com/hashicorp/terraform-plugin-go v0.19.1/go.mod h1:5NMIS+DXkfacX6o5HCpswda5yjkSYfKzn1Nfl9l+qRs= -github.com/hashicorp/terraform-plugin-mux v0.12.0 h1:TJlmeslQ11WlQtIFAfth0vXx+gSNgvMEng2Rn9z3WZY= -github.com/hashicorp/terraform-plugin-mux v0.12.0/go.mod h1:8MR0AgmV+Q03DIjyrAKxXyYlq2EUnYBQP8gxAAA0zeM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0/go.mod h1:ydFcxbdj6klCqYEPkPvdvFKiNGKZLUs+896ODUXCyao= +github.com/hashicorp/terraform-plugin-go v0.20.0 h1:oqvoUlL+2EUbKNsJbIt3zqqZ7wi6lzn4ufkn/UA51xQ= +github.com/hashicorp/terraform-plugin-go v0.20.0/go.mod h1:Rr8LBdMlY53a3Z/HpP+ZU3/xCDqtKNCkeI9qOyT10QE= +github.com/hashicorp/terraform-plugin-mux v0.13.0 h1:79U401/3nd8CWwDGtTHc8F3miSCAS9XGtVarxSTDgwA= +github.com/hashicorp/terraform-plugin-mux v0.13.0/go.mod h1:Ndv0FtwDG2ogzH59y64f2NYimFJ6I0smRgFUKfm6dyQ= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 h1:Bl3e2ei2j/Z3Hc2HIS15Gal2KMKyLAZ2om1HCEvK6es= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0/go.mod h1:i2C41tszDjiWfziPQDL5R/f3Zp0gahXe5No/MIO9rCE= github.com/hashicorp/terraform-plugin-testing v1.6.0 h1:Wsnfh+7XSVRfwcr2jZYHsnLOnZl7UeaOBvsx6dl/608= github.com/hashicorp/terraform-plugin-testing v1.6.0/go.mod h1:cJGG0/8j9XhHaJZRC+0sXFI4uzqQZ9Az4vh6C4GJpFE= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= @@ -432,7 +431,7 @@ github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NF github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -483,8 +482,8 @@ golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -544,10 +543,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= From ddfc15e04f86581580957640d8042d96b28aeea6 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 15 Dec 2023 18:27:32 +0000 Subject: [PATCH 260/438] Update CHANGELOG.md for #34941 --- CHANGELOG.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8a04d9dd4c9..4c9d48d4ff7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,4 +1,9 @@ ## 5.32.0 (Unreleased) + +ENHANCEMENTS: + +* data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) + ## 5.31.0 (December 15, 2023) FEATURES: From fc5f6a0d81aa8c50e980395e84605787be740d34 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 13:41:36 -0500 Subject: [PATCH 261/438] r/aws_lb_target_group: Tidy up 'flattenTargetGroupHealthCheck'. --- internal/service/elbv2/load_balancer.go | 12 +- internal/service/elbv2/target_group.go | 309 +++++++++--------- .../service/elbv2/target_group_data_source.go | 62 +--- 3 files changed, 159 insertions(+), 224 deletions(-) diff --git a/internal/service/elbv2/load_balancer.go b/internal/service/elbv2/load_balancer.go index 849b31031a7..45d7f76ba43 100644 --- a/internal/service/elbv2/load_balancer.go +++ b/internal/service/elbv2/load_balancer.go @@ -51,9 +51,9 @@ func ResourceLoadBalancer() *schema.Resource { }, CustomizeDiff: customdiff.Sequence( - customizeDiffALB, - customizeDiffNLB, - customizeDiffGWLB, + customizeDiffLoadBalancerALB, + customizeDiffLoadBalancerNLB, + customizeDiffLoadBalancerGWLB, verify.SetTagsDiff, ), @@ -1062,7 +1062,7 @@ func SuffixFromARN(arn *string) string { // cannot have security groups added if none are present, and cannot have // all security groups removed. If the type is 'network' and any of these // conditions are met, mark the diff as a ForceNew operation. -func customizeDiffNLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +func customizeDiffLoadBalancerNLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { // The current criteria for determining if the operation should be ForceNew: // - lb of type "network" // - existing resource (id is not "") @@ -1152,7 +1152,7 @@ func customizeDiffNLB(_ context.Context, diff *schema.ResourceDiff, v interface{ return nil } -func customizeDiffALB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +func customizeDiffLoadBalancerALB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumApplication { return nil } @@ -1208,7 +1208,7 @@ func customizeDiffALB(_ context.Context, diff *schema.ResourceDiff, v interface{ return nil } -func customizeDiffGWLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { +func customizeDiffLoadBalancerGWLB(_ context.Context, diff *schema.ResourceDiff, v interface{}) error { if lbType := diff.Get("load_balancer_type").(string); lbType != elbv2.LoadBalancerTypeEnumGateway { return nil } diff --git a/internal/service/elbv2/target_group.go b/internal/service/elbv2/target_group.go index dcfdaed20a9..0a5007286e8 100644 --- a/internal/service/elbv2/target_group.go +++ b/internal/service/elbv2/target_group.go @@ -50,8 +50,8 @@ func ResourceTargetGroup() *schema.Resource { CustomizeDiff: customdiff.Sequence( resourceTargetGroupCustomizeDiff, - lambdaTargetHealthCheckProtocolCustomizeDiff, - nonLambdaValidationCustomizeDiff, + customizeDiffTargetGroupTargetTypeLambda, + customizeDiffTargetGroupTargetTypeNotLambda, verify.SetTagsDiff, ), @@ -110,7 +110,7 @@ func ResourceTargetGroup() *schema.Resource { Computed: true, ValidateFunc: validation.All( validation.StringLenBetween(1, 1024), - validTargetGroupHealthCheckPath, + verify.StringHasPrefix("/"), ), }, "port": { @@ -236,10 +236,13 @@ func ResourceTargetGroup() *schema.Resource { Default: false, }, "slow_start": { - Type: schema.TypeInt, - Optional: true, - Default: 0, - ValidateFunc: validateSlowStart, + Type: schema.TypeInt, + Optional: true, + Default: 0, + ValidateFunc: validation.Any( + validation.IntBetween(0, 0), + validation.IntBetween(30, 900), + ), }, "stickiness": { Type: schema.TypeList, @@ -523,16 +526,15 @@ func resourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta i d.Set("arn", targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) + if err := d.Set("health_check", flattenTargetGroupHealthCheck(targetGroup)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) + } d.Set("ip_address_type", targetGroup.IpAddressType) d.Set("name", targetGroup.TargetGroupName) d.Set("name_prefix", create.NamePrefixFromName(aws.StringValue(targetGroup.TargetGroupName))) targetType := aws.StringValue(targetGroup.TargetType) d.Set("target_type", targetType) - if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) - } - if _, ok := d.GetOk("port"); targetGroup.Port != nil || ok { d.Set("port", targetGroup.Port) } @@ -934,28 +936,6 @@ func findTargetGroupAttributesByARN(ctx context.Context, conn *elbv2.ELBV2, arn return output.Attributes, nil } -func validTargetGroupHealthCheckPath(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - if !strings.HasPrefix(value, "/") { - errors = append(errors, fmt.Errorf( - "%q must begin with a '/' character, got %q", k, value)) - } - return -} - -func validateSlowStart(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - - // Check if the value is between 30-900 or 0 (seconds). - if value != 0 && !(value >= 30 && value <= 900) { - errors = append(errors, fmt.Errorf( - "%q contains an invalid Slow Start Duration \"%d\". "+ - "Valid intervals are 30-900 or 0 to disable.", - k, value)) - } - return -} - func validTargetGroupHealthCheckPort(v interface{}, k string) (ws []string, errors []error) { value := v.(string) @@ -989,6 +969,139 @@ func TargetGroupSuffixFromARN(arn *string) string { return "" } +func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { + healthCheck := make(map[string]any) + if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { + healthCheck = healthChecks[0].(map[string]interface{}) + } + + if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { + if m := healthCheck["matcher"].(string); m != "" { + return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", + "health_check.matcher", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + ) + } + + if m := healthCheck["path"].(string); m != "" { + return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", + "health_check.path", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + ) + } + } + + protocol := diff.Get("protocol").(string) + + switch protocol { + case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: + if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { + return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + "protocol", + protocol, + ) + } + } + + if diff.Id() == "" { + return nil + } + + return nil +} + +func customizeDiffTargetGroupTargetTypeLambda(_ context.Context, diff *schema.ResourceDiff, meta any) error { + if diff.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { + return nil + } + + if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { + healthCheck := healthChecks[0].(map[string]interface{}) + healthCheckProtocol := healthCheck["protocol"].(string) + + if healthCheckProtocol == elbv2.ProtocolEnumTcp { + return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", + "health_check.protocol", + elbv2.ProtocolEnumTcp, + "target_type", + elbv2.TargetTypeEnumLambda, + ) + } + } + + return nil +} + +func customizeDiffTargetGroupTargetTypeNotLambda(_ context.Context, diff *schema.ResourceDiff, meta any) error { + targetType := diff.Get("target_type").(string) + if targetType == elbv2.TargetTypeEnumLambda { + return nil + } + + config := diff.GetRawConfig() + + if v := config.GetAttr("port"); v.IsKnown() && v.IsNull() { + return fmt.Errorf("Attribute %q must be specified when %q is %q.", + "port", + "target_type", + targetType, + ) + } + + if v := config.GetAttr("protocol"); v.IsKnown() && v.IsNull() { + return fmt.Errorf("Attribute %q must be specified when %q is %q.", + "protocol", + "target_type", + targetType, + ) + } + + if v := config.GetAttr("vpc_id"); v.IsKnown() && v.IsNull() { + return fmt.Errorf("Attribute %q must be specified when %q is %q.", + "vpc_id", + "target_type", + targetType, + ) + } + + return nil +} + +func flattenTargetGroupHealthCheck(apiObject *elbv2.TargetGroup) []interface{} { + if apiObject == nil { + return []interface{}{} + } + + tfMap := map[string]interface{}{ + "enabled": aws.BoolValue(apiObject.HealthCheckEnabled), + "healthy_threshold": int(aws.Int64Value(apiObject.HealthyThresholdCount)), + "interval": int(aws.Int64Value(apiObject.HealthCheckIntervalSeconds)), + "port": aws.StringValue(apiObject.HealthCheckPort), + "protocol": aws.StringValue(apiObject.HealthCheckProtocol), + "timeout": int(aws.Int64Value(apiObject.HealthCheckTimeoutSeconds)), + "unhealthy_threshold": int(aws.Int64Value(apiObject.UnhealthyThresholdCount)), + } + + if v := apiObject.HealthCheckPath; v != nil { + tfMap["path"] = aws.StringValue(v) + } + + if apiObject := apiObject.Matcher; apiObject != nil { + if v := apiObject.HttpCode; v != nil { + tfMap["matcher"] = aws.StringValue(v) + } + if v := apiObject.GrpcCode; v != nil { + tfMap["matcher"] = aws.StringValue(v) + } + } + + return []interface{}{tfMap} +} + func expandTargetGroupStickinessAttributes(tfMap map[string]interface{}, protocol string) []*elbv2.TargetGroupAttribute { if tfMap == nil { return nil @@ -1150,136 +1263,6 @@ func flattenTargetGroupTargetHealthStateAttributes(apiObjects []*elbv2.TargetGro return tfMap } -func resourceTargetGroupCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { - healthCheck := make(map[string]any) - if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { - healthCheck = healthChecks[0].(map[string]interface{}) - } - - if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { - if m := healthCheck["matcher"].(string); m != "" { - return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", - "health_check.matcher", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - ) - } - - if m := healthCheck["path"].(string); m != "" { - return fmt.Errorf("Attribute %q cannot be specified when %q is %q.", - "health_check.path", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - ) - } - } - - protocol := diff.Get("protocol").(string) - - switch protocol { - case elbv2.ProtocolEnumHttp, elbv2.ProtocolEnumHttps: - if p, ok := healthCheck["protocol"].(string); ok && strings.ToUpper(p) == elbv2.ProtocolEnumTcp { - return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - "protocol", - protocol, - ) - } - } - - if diff.Id() == "" { - return nil - } - - return nil -} - -func lambdaTargetHealthCheckProtocolCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { - if diff.Get("target_type").(string) != elbv2.TargetTypeEnumLambda { - return nil - } - - if healthChecks := diff.Get("health_check").([]interface{}); len(healthChecks) == 1 { - healthCheck := healthChecks[0].(map[string]interface{}) - healthCheckProtocol := healthCheck["protocol"].(string) - - if healthCheckProtocol == elbv2.ProtocolEnumTcp { - return fmt.Errorf("Attribute %q cannot have value %q when %q is %q.", - "health_check.protocol", - elbv2.ProtocolEnumTcp, - "target_type", - elbv2.TargetTypeEnumLambda, - ) - } - } - - return nil -} - -func nonLambdaValidationCustomizeDiff(_ context.Context, diff *schema.ResourceDiff, meta any) error { - targetType := diff.Get("target_type").(string) - if targetType == elbv2.TargetTypeEnumLambda { - return nil - } - - config := diff.GetRawConfig() - - if v := config.GetAttr("port"); v.IsKnown() && v.IsNull() { - return fmt.Errorf("Attribute %q must be specified when %q is %q.", - "port", - "target_type", - targetType, - ) - } - - if v := config.GetAttr("protocol"); v.IsKnown() && v.IsNull() { - return fmt.Errorf("Attribute %q must be specified when %q is %q.", - "protocol", - "target_type", - targetType, - ) - } - - if v := config.GetAttr("vpc_id"); v.IsKnown() && v.IsNull() { - return fmt.Errorf("Attribute %q must be specified when %q is %q.", - "vpc_id", - "target_type", - targetType, - ) - } - - return nil -} - -func flattenLbTargetGroupHealthCheck(targetGroup *elbv2.TargetGroup) []interface{} { - if targetGroup == nil { - return []interface{}{} - } - - m := map[string]interface{}{ - "enabled": aws.BoolValue(targetGroup.HealthCheckEnabled), - "healthy_threshold": int(aws.Int64Value(targetGroup.HealthyThresholdCount)), - "interval": int(aws.Int64Value(targetGroup.HealthCheckIntervalSeconds)), - "port": aws.StringValue(targetGroup.HealthCheckPort), - "protocol": aws.StringValue(targetGroup.HealthCheckProtocol), - "timeout": int(aws.Int64Value(targetGroup.HealthCheckTimeoutSeconds)), - "unhealthy_threshold": int(aws.Int64Value(targetGroup.UnhealthyThresholdCount)), - } - - if targetGroup.HealthCheckPath != nil { - m["path"] = aws.StringValue(targetGroup.HealthCheckPath) - } - if targetGroup.Matcher != nil && targetGroup.Matcher.HttpCode != nil { - m["matcher"] = aws.StringValue(targetGroup.Matcher.HttpCode) - } - if targetGroup.Matcher != nil && targetGroup.Matcher.GrpcCode != nil { - m["matcher"] = aws.StringValue(targetGroup.Matcher.GrpcCode) - } - - return []interface{}{m} -} - func pathString(path cty.Path) string { var buf strings.Builder for i, step := range path { diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index 85b4dd1b326..86b78ca2ebb 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -6,7 +6,6 @@ package elbv2 import ( "context" "log" - "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -220,20 +219,18 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta } targetGroup := results[0] - d.SetId(aws.StringValue(targetGroup.TargetGroupArn)) - d.Set("arn", targetGroup.TargetGroupArn) d.Set("arn_suffix", TargetGroupSuffixFromARN(targetGroup.TargetGroupArn)) - d.Set("name", targetGroup.TargetGroupName) - d.Set("target_type", targetGroup.TargetType) - - if err := d.Set("health_check", flattenLbTargetGroupHealthCheck(targetGroup)); err != nil { + if err := d.Set("health_check", flattenTargetGroupHealthCheck(targetGroup)); err != nil { return sdkdiag.AppendErrorf(diags, "setting health_check: %s", err) } + d.Set("name", targetGroup.TargetGroupName) + targetType := aws.StringValue(targetGroup.TargetType) + d.Set("target_type", targetType) var protocol string - if v, _ := d.Get("target_type").(string); v != elbv2.TargetTypeEnumLambda { + if targetType != elbv2.TargetTypeEnumLambda { d.Set("port", targetGroup.Port) protocol = aws.StringValue(targetGroup.Protocol) d.Set("protocol", protocol) @@ -250,57 +247,12 @@ func dataSourceTargetGroupRead(ctx context.Context, d *schema.ResourceData, meta return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group (%s) attributes: %s", d.Id(), err) } - for _, attr := range attributes { - switch aws.StringValue(attr.Key) { - case "deregistration_delay.connection_termination.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.connection_termination.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("connection_termination", enabled) - case "deregistration_delay.timeout_seconds": - timeout, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting deregistration_delay.timeout_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("deregistration_delay", timeout) - case "lambda.multi_value_headers.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting lambda.multi_value_headers.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("lambda_multi_value_headers_enabled", enabled) - case "proxy_protocol_v2.enabled": - enabled, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting proxy_protocol_v2.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("proxy_protocol_v2", enabled) - case "slow_start.duration_seconds": - slowStart, err := strconv.Atoi(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting slow_start.duration_seconds to int: %s", aws.StringValue(attr.Value)) - } - d.Set("slow_start", slowStart) - case "load_balancing.algorithm.type": - loadBalancingAlgorithm := aws.StringValue(attr.Value) - d.Set("load_balancing_algorithm_type", loadBalancingAlgorithm) - case "load_balancing.cross_zone.enabled": - loadBalancingCrossZoneEnabled := aws.StringValue(attr.Value) - d.Set("load_balancing_cross_zone_enabled", loadBalancingCrossZoneEnabled) - case "preserve_client_ip.enabled": - _, err := strconv.ParseBool(aws.StringValue(attr.Value)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "converting preserve_client_ip.enabled to bool: %s", aws.StringValue(attr.Value)) - } - d.Set("preserve_client_ip", attr.Value) - } - } - if err := d.Set("stickiness", []interface{}{flattenTargetGroupStickinessAttributes(attributes, protocol)}); err != nil { return sdkdiag.AppendErrorf(diags, "setting stickiness: %s", err) } + targetGroupAttributes.flatten(d, targetType, attributes) + tags, err := listTags(ctx, conn, d.Id()) if errs.IsUnsupportedOperationInPartitionError(conn.PartitionID, err) { From fddd2056de203818eb9c56d1282cafde07bd24fe Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 13:53:23 -0500 Subject: [PATCH 262/438] d/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString`. --- .changelog/31436.txt | 4 ++++ internal/service/elbv2/target_group_data_source.go | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.changelog/31436.txt b/.changelog/31436.txt index cd5e71e36c0..f9bc005b5ff 100644 --- a/.changelog/31436.txt +++ b/.changelog/31436.txt @@ -1,3 +1,7 @@ ```release-note:bug resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` +``` + +```release-note:bug +data-source/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString` ``` \ No newline at end of file diff --git a/internal/service/elbv2/target_group_data_source.go b/internal/service/elbv2/target_group_data_source.go index 86b78ca2ebb..f4fc4b88208 100644 --- a/internal/service/elbv2/target_group_data_source.go +++ b/internal/service/elbv2/target_group_data_source.go @@ -44,7 +44,7 @@ func DataSourceTargetGroup() *schema.Resource { Computed: true, }, "deregistration_delay": { - Type: schema.TypeInt, + Type: schema.TypeString, Computed: true, }, "health_check": { From cd2603a848baf0e1d9a65e182aa07683f4c222e5 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 14:12:43 -0500 Subject: [PATCH 263/438] autoflex: Allow set block map keys --- internal/framework/flex/auto_expand_test.go | 30 +++++++++ internal/framework/flex/auto_flatten.go | 69 ++++++++++++++++++++ internal/framework/flex/auto_flatten_test.go | 21 ++++++ internal/framework/flex/autoflex_test.go | 4 ++ 4 files changed, 124 insertions(+) diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index 4781197f00f..b001c0ea557 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -605,6 +605,36 @@ func TestExpandGeneric(t *testing.T) { }, }, }, + { + TestName: "block set key map", + Source: &TestFlexBlockKeyMapTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + TFBlockKeyMap: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexBlockKeyMapAWS01{}, + WantTarget: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + "y": { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, { TestName: "block key map ptr source", Source: &TestFlexBlockKeyMapTF01{ diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index fd118d06cb2..3b0723becdf 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -508,6 +508,14 @@ func (flattener autoFlattener) map_(ctx context.Context, vFrom reflect.Value, tT switch tMapElem := vFrom.Type().Elem(); tMapElem.Kind() { case reflect.Struct: switch tTo := tTo.(type) { + case basetypes.SetTypable: + // + // map[string]struct -> fwtypes.ListNestedObjectOf[Object] + // + if tTo, ok := tTo.(fwtypes.NestedObjectType); ok { + diags.Append(flattener.structMapToObjectList(ctx, vFrom, tTo, vTo)...) + return diags + } case basetypes.ListTypable: // // map[string]struct -> fwtypes.ListNestedObjectOf[Object] @@ -751,6 +759,67 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom return diags } +/* +func (flattener autoFlattener) structMapToObjectSet(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { + var diags diag.Diagnostics + + if vFrom.IsNil() { + val, d := tTo.NullValue(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + return diags + } + + n := vFrom.Len() + to, d := tTo.NewObjectSlice(ctx, n, n) + diags.Append(d...) + if diags.HasError() { + return diags + } + + t := reflect.ValueOf(to) + + i := 0 + for _, key := range vFrom.MapKeys() { + target, d := tTo.NewObjectPtr(ctx) + diags.Append(d...) + if diags.HasError() { + return diags + } + + fromInterface := vFrom.MapIndex(key).Interface() + if vFrom.MapIndex(key).Kind() == reflect.Ptr { + fromInterface = vFrom.MapIndex(key).Elem().Interface() + } + + diags.Append(autoFlexConvertStruct(ctx, fromInterface, target, flattener)...) + if diags.HasError() { + return diags + } + + d = blockKeyMapSet(target, key.String()) + diags.Append(d...) + + t.Index(i).Set(reflect.ValueOf(target)) + i++ + } + + val, d := tTo.ValueFromObjectSlice(ctx, to) + diags.Append(d...) + if diags.HasError() { + return diags + } + + vTo.Set(reflect.ValueOf(val)) + + return diags +} +*/ + // structToNestedObject copies an AWS API struct value to a compatible Plugin Framework NestedObjectValue value. func (flattener autoFlattener) structToNestedObject(ctx context.Context, vFrom reflect.Value, isNullFrom bool, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index ed98e88250e..fbd6da59844 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -770,6 +770,27 @@ func TestFlattenGeneric(t *testing.T) { }), }, }, + { + TestName: "block key set map", + Source: &TestFlexBlockKeyMapAWS01{ + BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + "x": { + Attr1: "a", + Attr2: "b", + }, + }, + }, + Target: &TestFlexBlockKeyMapTF03{}, + WantTarget: &TestFlexBlockKeyMapTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + { + TFBlockKeyMap: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + }), + }, + }, { TestName: "block key map ptr source", Source: &TestFlexBlockKeyMapAWS03{ diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 491874db027..6ae32a5ed8d 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -308,6 +308,10 @@ type TestFlexBlockKeyMapTF02 struct { Attr2 types.String `tfsdk:"attr2"` } +type TestFlexBlockKeyMapTF03 struct { + BlockMap fwtypes.SetNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +} + type TestFlexBlockKeyMapAWS01 struct { BlockMap map[string]TestFlexBlockKeyMapAWS02 } From 80904fe84382d3ee3dcc25ab6b5505b2ac4b96d4 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Fri, 15 Dec 2023 14:54:38 -0500 Subject: [PATCH 264/438] autoflex: Rename map --- internal/framework/flex/auto_expand.go | 4 +- internal/framework/flex/auto_expand_test.go | 96 ++++++++++---------- internal/framework/flex/auto_flatten.go | 4 +- internal/framework/flex/auto_flatten_test.go | 72 +++++++-------- internal/framework/flex/autoflex.go | 4 +- internal/framework/flex/autoflex_test.go | 26 +++--- 6 files changed, 103 insertions(+), 103 deletions(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index cfb464dacaf..ad2f799470e 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -762,7 +762,7 @@ func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { } // go from StringValue to string - if field.Name == BlockKeyMap { + if field.Name == MapBlockKey { if v, ok := valFrom.Field(i).Interface().(basetypes.StringValue); ok { return reflect.ValueOf(v.ValueString()), diags } @@ -770,7 +770,7 @@ func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { } } - diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", MapBlockKey)) return reflect.Zero(reflect.TypeOf("")), diags } diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index b001c0ea557..b6ae8721391 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -576,24 +576,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block key map", - Source: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + TestName: "map block key list", + Source: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS01{}, - WantTarget: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -606,24 +606,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block set key map", - Source: &TestFlexBlockKeyMapTF03{ - BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + TestName: "map block key set", + Source: &TestFlexMapBlockKeyTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS01{}, - WantTarget: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -636,24 +636,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block key map ptr source", - Source: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + TestName: "map block key ptr source", + Source: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS01{}, - WantTarget: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -666,24 +666,24 @@ func TestExpandGeneric(t *testing.T) { }, }, { - TestName: "block key map ptr both", - Source: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + TestName: "map block key ptr both", + Source: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, { - TFBlockKeyMap: types.StringValue("y"), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), + MapBlockKey: types.StringValue("y"), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), }, }), }, - Target: &TestFlexBlockKeyMapAWS03{}, - WantTarget: &TestFlexBlockKeyMapAWS03{ - BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + Target: &TestFlexMapBlockKeyAWS03{}, + WantTarget: &TestFlexMapBlockKeyAWS03{ + BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 3b0723becdf..f29ee88ccc6 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -928,7 +928,7 @@ func blockKeyMapSet(to any, key string) diag.Diagnostics { continue // Skip unexported fields. } - if field.Name != BlockKeyMap { + if field.Name != MapBlockKey { continue } @@ -940,7 +940,7 @@ func blockKeyMapSet(to any, key string) diag.Diagnostics { return diags } - diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", BlockKeyMap)) + diags.AddError("AutoFlEx", fmt.Sprintf("unable to find map block key (%s)", MapBlockKey)) return diags } diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index fbd6da59844..69fc1151d4a 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -750,85 +750,85 @@ func TestFlattenGeneric(t *testing.T) { }, }, { - TestName: "block key map", - Source: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + TestName: "map block key list", + Source: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF01{}, - WantTarget: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF01{}, + WantTarget: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, }, { - TestName: "block key set map", - Source: &TestFlexBlockKeyMapAWS01{ - BlockMap: map[string]TestFlexBlockKeyMapAWS02{ + TestName: "map block key set", + Source: &TestFlexMapBlockKeyAWS01{ + BlockMap: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF03{}, - WantTarget: &TestFlexBlockKeyMapTF03{ - BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF03{}, + WantTarget: &TestFlexMapBlockKeyTF03{ + BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, }, { - TestName: "block key map ptr source", - Source: &TestFlexBlockKeyMapAWS03{ - BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + TestName: "map block key ptr source", + Source: &TestFlexMapBlockKeyAWS03{ + BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF01{}, - WantTarget: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexBlockKeyMapTF02](ctx, []TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF01{}, + WantTarget: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, }, { - TestName: "block key map ptr both", - Source: &TestFlexBlockKeyMapAWS03{ - BlockMap: map[string]*TestFlexBlockKeyMapAWS02{ + TestName: "map block key ptr both", + Source: &TestFlexMapBlockKeyAWS03{ + BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", }, }, }, - Target: &TestFlexBlockKeyMapTF01{}, - WantTarget: &TestFlexBlockKeyMapTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexBlockKeyMapTF02{ + Target: &TestFlexMapBlockKeyTF01{}, + WantTarget: &TestFlexMapBlockKeyTF01{ + BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { - TFBlockKeyMap: types.StringValue("x"), - Attr1: types.StringValue("a"), - Attr2: types.StringValue("b"), + MapBlockKey: types.StringValue("x"), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), }, }), }, diff --git a/internal/framework/flex/autoflex.go b/internal/framework/flex/autoflex.go index fda37eb9b05..ace1d4ecf52 100644 --- a/internal/framework/flex/autoflex.go +++ b/internal/framework/flex/autoflex.go @@ -18,7 +18,7 @@ type ResourcePrefixCtxKey string const ( ResourcePrefix ResourcePrefixCtxKey = "RESOURCE_PREFIX" ResourcePrefixRecurse ResourcePrefixCtxKey = "RESOURCE_PREFIX_RECURSE" - BlockKeyMap = "TFBlockKeyMap" + MapBlockKey = "MapBlockKey" ) // Expand = TF --> AWS @@ -95,7 +95,7 @@ func autoFlexConvertStruct(ctx context.Context, from any, to any, flexer autoFle if fieldName == "Tags" { continue // Resource tags are handled separately. } - if fieldName == BlockKeyMap { + if fieldName == MapBlockKey { continue } diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index 6ae32a5ed8d..b30fac0c715 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -298,29 +298,29 @@ type TestFlexTF18 struct { Field6 fwtypes.MapValueOf[types.String] `tfsdk:"field6"` } -type TestFlexBlockKeyMapTF01 struct { - BlockMap fwtypes.ListNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +type TestFlexMapBlockKeyTF01 struct { + BlockMap fwtypes.ListNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"block_map"` } -type TestFlexBlockKeyMapTF02 struct { - TFBlockKeyMap types.String `tfsdk:"block_key_map"` - Attr1 types.String `tfsdk:"attr1"` - Attr2 types.String `tfsdk:"attr2"` +type TestFlexMapBlockKeyTF02 struct { + MapBlockKey types.String `tfsdk:"map_block_key"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` } -type TestFlexBlockKeyMapTF03 struct { - BlockMap fwtypes.SetNestedObjectValueOf[TestFlexBlockKeyMapTF02] `tfsdk:"block_map"` +type TestFlexMapBlockKeyTF03 struct { + BlockMap fwtypes.SetNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"block_map"` } -type TestFlexBlockKeyMapAWS01 struct { - BlockMap map[string]TestFlexBlockKeyMapAWS02 +type TestFlexMapBlockKeyAWS01 struct { + BlockMap map[string]TestFlexMapBlockKeyAWS02 } -type TestFlexBlockKeyMapAWS02 struct { +type TestFlexMapBlockKeyAWS02 struct { Attr1 string Attr2 string } -type TestFlexBlockKeyMapAWS03 struct { - BlockMap map[string]*TestFlexBlockKeyMapAWS02 +type TestFlexMapBlockKeyAWS03 struct { + BlockMap map[string]*TestFlexMapBlockKeyAWS02 } From cb79548414329a4d88755a7be85beb225b5ab7e3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:29:43 -0500 Subject: [PATCH 265/438] r/aws_lb_target_group_attachment: Alphabetize attributes. --- .../service/elbv2/target_group_attachment.go | 71 +++++++++---------- 1 file changed, 34 insertions(+), 37 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 70433ac3a2b..13790803de9 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -30,29 +30,26 @@ func ResourceTargetGroupAttachment() *schema.Resource { DeleteWithoutTimeout: resourceAttachmentDelete, Schema: map[string]*schema.Schema{ + "availability_zone": { + Type: schema.TypeString, + ForceNew: true, + Optional: true, + }, "target_group_arn": { Type: schema.TypeString, ForceNew: true, Required: true, }, - "target_id": { Type: schema.TypeString, ForceNew: true, Required: true, }, - "port": { Type: schema.TypeInt, ForceNew: true, Optional: true, }, - - "availability_zone": { - Type: schema.TypeString, - ForceNew: true, - Optional: true, - }, }, } } @@ -107,35 +104,6 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta return diags } -func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) - } - - if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) - } - - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - } - - _, err := conn.DeregisterTargetsWithContext(ctx, params) - if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return sdkdiag.AppendErrorf(diags, "deregistering Targets: %s", err) - } - - return diags -} - // resourceAttachmentRead requires all of the fields in order to describe the correct // target, so there is no work to do beyond ensuring that the target and group still exist. func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { @@ -204,3 +172,32 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in return diags } + +func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) + + target := &elbv2.TargetDescription{ + Id: aws.String(d.Get("target_id").(string)), + } + + if v, ok := d.GetOk("port"); ok { + target.Port = aws.Int64(int64(v.(int))) + } + + if v, ok := d.GetOk("availability_zone"); ok { + target.AvailabilityZone = aws.String(v.(string)) + } + + params := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), + Targets: []*elbv2.TargetDescription{target}, + } + + _, err := conn.DeregisterTargetsWithContext(ctx, params) + if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + return sdkdiag.AppendErrorf(diags, "deregistering Targets: %s", err) + } + + return diags +} From b053503658aa426acae4b125b750b57281e96fae Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:35:00 -0500 Subject: [PATCH 266/438] r/aws_lb_target_group_attachment: Tidy up Create. --- .../service/elbv2/target_group_attachment.go | 46 ++++++------------- 1 file changed, 14 insertions(+), 32 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 13790803de9..d344454236f 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -5,7 +5,6 @@ package elbv2 import ( "context" - "fmt" "log" "time" @@ -14,7 +13,6 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -58,48 +56,32 @@ func resourceAttachmentCreate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) + targetGroupARN := d.Get("target_group_arn").(string) + input := &elbv2.RegisterTargetsInput{ + TargetGroupArn: aws.String(targetGroupARN), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(d.Get("target_id").(string)), + }}, } if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) + input.Targets[0].AvailabilityZone = aws.String(v.(string)) } - params := &elbv2.RegisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, + if v, ok := d.GetOk("port"); ok { + input.Targets[0].Port = aws.Int64(int64(v.(int))) } - log.Printf("[INFO] Registering Target %s with Target Group %s", d.Get("target_id").(string), - d.Get("target_group_arn").(string)) - - err := retry.RetryContext(ctx, 10*time.Minute, func() *retry.RetryError { - _, err := conn.RegisterTargetsWithContext(ctx, params) - - if tfawserr.ErrCodeEquals(err, "InvalidTarget") { - return retry.RetryableError(fmt.Errorf("attaching instance to LB, retrying: %s", err)) - } - - if err != nil { - return retry.NonRetryableError(err) - } + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, 10*time.Minute, func() (interface{}, error) { + return conn.RegisterTargetsWithContext(ctx, input) + }, elbv2.ErrCodeInvalidTargetException) - return nil - }) - if tfresource.TimedOut(err) { - _, err = conn.RegisterTargetsWithContext(ctx, params) - } if err != nil { - return sdkdiag.AppendErrorf(diags, "registering targets with target group: %s", err) + return sdkdiag.AppendErrorf(diags, "registering ELBv2 Target Group (%s) target: %s", targetGroupARN, err) } //lintignore:R016 // Allow legacy unstable ID usage in managed resource - d.SetId(id.PrefixedUniqueId(fmt.Sprintf("%s-", d.Get("target_group_arn")))) + d.SetId(id.PrefixedUniqueId(targetGroupARN + "-")) return diags } From 5c371181bb857e9fd8b1a656128491297031e3b6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:48:54 -0500 Subject: [PATCH 267/438] Move 'internal/experimental/depgraph/stack' -> 'internal/types/stack'. --- .../experimental/depgraph/dependency_graph.go | 15 ++--- internal/experimental/depgraph/stack.go | 56 ------------------ internal/types/stack/stack.go | 58 +++++++++++++++++++ .../depgraph => types/stack}/stack_test.go | 32 +++++----- 4 files changed, 82 insertions(+), 79 deletions(-) delete mode 100644 internal/experimental/depgraph/stack.go create mode 100644 internal/types/stack/stack.go rename internal/{experimental/depgraph => types/stack}/stack_test.go (56%) diff --git a/internal/experimental/depgraph/dependency_graph.go b/internal/experimental/depgraph/dependency_graph.go index b4a5bfd291b..e599f122059 100644 --- a/internal/experimental/depgraph/dependency_graph.go +++ b/internal/experimental/depgraph/dependency_graph.go @@ -8,6 +8,7 @@ import ( "strings" tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" + "github.com/hashicorp/terraform-provider-aws/internal/types/stack" "golang.org/x/exp/slices" ) @@ -204,20 +205,20 @@ func depthFirstSearch(edges map[string][]string) func(s string) ([]string, error inCurrentPath := make(map[string]struct{}) currentPath := make([]string, 0) - todo := newStack() + todo := stack.New[*todoValue]() - todo.push(&todoValue{ + todo.Push(&todoValue{ node: s, }) - for todo.len() > 0 { - current := todo.peek().(*todoValue) + for todo.Len() > 0 { + current := todo.Peek() node := current.node if !current.processed { // Visit edges. if slices.Contains(visited, node) { - todo.pop() + todo.Pop() continue } @@ -232,7 +233,7 @@ func depthFirstSearch(edges map[string][]string) func(s string) ([]string, error nodeEdges := edges[node] for i := len(nodeEdges) - 1; i >= 0; i-- { - todo.push(&todoValue{ + todo.Push(&todoValue{ node: nodeEdges[i], }) } @@ -241,7 +242,7 @@ func depthFirstSearch(edges map[string][]string) func(s string) ([]string, error } else { // Edges have been visited. // Unroll the stack. - todo.pop() + todo.Pop() if n := len(currentPath); n > 0 { currentPath = currentPath[:n-1] } diff --git a/internal/experimental/depgraph/stack.go b/internal/experimental/depgraph/stack.go deleted file mode 100644 index 05095542460..00000000000 --- a/internal/experimental/depgraph/stack.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package depgraph - -type stack struct { - top *stackNode - length int -} - -type stackNode struct { - value interface{} - prev *stackNode -} - -// newStack returns a new, empty stack. -func newStack() *stack { - return &stack{} -} - -// len returns the stack's depth. -func (s *stack) len() int { - return s.length -} - -// peek returns the top item on the stack. -func (s *stack) peek() interface{} { - if s.length == 0 { - return nil - } - - return s.top.value -} - -// pop returns the top item on the stack and removes it from the stack. -func (s *stack) pop() interface{} { - if s.length == 0 { - return nil - } - - top := s.top - s.top = top.prev - s.length-- - - return top.value -} - -// push puts the specified item on the top of the stack. -func (s *stack) push(value interface{}) { - node := &stackNode{ - value: value, - prev: s.top, - } - s.top = node - s.length++ -} diff --git a/internal/types/stack/stack.go b/internal/types/stack/stack.go new file mode 100644 index 00000000000..72f1fdbb2e5 --- /dev/null +++ b/internal/types/stack/stack.go @@ -0,0 +1,58 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package stack + +type stack[T any] struct { + top *stackNode[T] + length int +} + +type stackNode[T any] struct { + value T + prev *stackNode[T] +} + +// New returns a new, empty stack. +func New[T any]() *stack[T] { + return &stack[T]{} +} + +// Len returns the stack's depth. +func (s *stack[T]) Len() int { + return s.length +} + +// Peek returns the top item on the stack. +func (s *stack[T]) Peek() T { + if s.length == 0 { + var zero T + return zero + } + + return s.top.value +} + +// Pop returns the top item on the stack and removes it from the stack. +func (s *stack[T]) Pop() T { + if s.length == 0 { + var zero T + return zero + } + + top := s.top + s.top = top.prev + s.length-- + + return top.value +} + +// Push puts the specified item on the top of the stack. +func (s *stack[T]) Push(value T) { + node := &stackNode[T]{ + value: value, + prev: s.top, + } + s.top = node + s.length++ +} diff --git a/internal/experimental/depgraph/stack_test.go b/internal/types/stack/stack_test.go similarity index 56% rename from internal/experimental/depgraph/stack_test.go rename to internal/types/stack/stack_test.go index 7bf653e9db4..fd8c70309ad 100644 --- a/internal/experimental/depgraph/stack_test.go +++ b/internal/types/stack/stack_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package depgraph +package stack import ( "testing" @@ -10,54 +10,54 @@ import ( func TestStack(t *testing.T) { t.Parallel() - s := newStack() + s := New[int]() - if got, expected := s.len(), 0; got != expected { + if got, expected := s.Len(), 0; got != expected { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.peek(), interface{}(nil); got != expected { + if got, expected := s.Peek(), interface{}(nil); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.pop(), interface{}(nil); got != expected { + if got, expected := s.Pop(), interface{}(nil); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - s.push(1) + s.Push(1) - if got, expected := s.len(), 1; got != expected { + if got, expected := s.Len(), 1; got != expected { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.peek(), interface{}(1); got != expected { + if got, expected := s.Peek(), interface{}(1); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.pop(), interface{}(1); got != expected { + if got, expected := s.Pop(), interface{}(1); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.len(), 0; got != expected { + if got, expected := s.Len(), 0; got != expected { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - s.push(2) - s.push(3) + s.Push(2) + s.Push(3) - if got, expected := s.len(), 2; got != expected { + if got, expected := s.Len(), 2; got != expected { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.peek(), interface{}(3); got != expected { + if got, expected := s.Peek(), interface{}(3); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.pop(), interface{}(3); got != expected { + if got, expected := s.Pop(), interface{}(3); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.peek(), interface{}(2); got != expected { + if got, expected := s.Peek(), interface{}(2); got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } } From 106f2c19ed847f132633187a736b781b44fc2dea Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:49:28 -0500 Subject: [PATCH 268/438] Add 'json.RemoveEmptyFields'. --- internal/json/remove.go | 72 ++++++++++++++++++++++++++++++ internal/json/remove_test.go | 85 ++++++++++++++++++++++++++++++++++++ 2 files changed, 157 insertions(+) diff --git a/internal/json/remove.go b/internal/json/remove.go index 43233c5db2d..1191adf2e07 100644 --- a/internal/json/remove.go +++ b/internal/json/remove.go @@ -42,3 +42,75 @@ func RemoveFields(in string, fields ...string) string { return string(out) } + +// RemoveEmptyFields removes all empty fields from a valid JSON string. +func RemoveEmptyFields(in string) string { + n := 0 + for { + in, n = removeEmptyFields(in) + if n == 0 { + break + } + } + + return in +} + +// removeEmptyFields removes `null`, empty array (`[]`) and empty object (`{}`) fields from a valid JSON string. +// Returns the new JSON string and the number of empty fields removed. +func removeEmptyFields(in string) (string, int) { + out := make([]byte, 0, len(in)) + lenBefore := 0 + removed := 0 + + err := ujson.Walk([]byte(in), func(_ int, key, value []byte) bool { + n := len(out) + + // For valid JSON, value will never be empty. + skip := false + switch value[0] { + case 'n': // Null (null) + skip = true + case '[': // Start of array + lenBefore = n + case ']': // End of array + if out[n-1] == '[' { + // Truncate output. + out = out[:lenBefore] + lenBefore = 0 + skip = true + } + case '{': // Start of object + lenBefore = n + case '}': // End of object + if n > 1 && out[n-1] == '{' { + // Truncate output. + out = out[:lenBefore] + lenBefore = 0 + skip = true + } + } + + if skip { + removed++ + return false + } + + if n != 0 && ujson.ShouldAddComma(value, out[n-1]) { + out = append(out, ',') + } + if len(key) > 0 { + out = append(out, key...) + out = append(out, ':') + } + out = append(out, value...) + + return true + }) + + if err != nil { + return "", 0 + } + + return string(out), removed +} diff --git a/internal/json/remove_test.go b/internal/json/remove_test.go index 541ee288a56..629054ed144 100644 --- a/internal/json/remove_test.go +++ b/internal/json/remove_test.go @@ -43,3 +43,88 @@ func TestRemoveFields(t *testing.T) { }) } } + +func TestRemoveEmptyFields(t *testing.T) { + t.Parallel() + + testCases := []struct { + testName string + input string + want string + }{ + // { + // testName: "empty JSON", + // input: "{}", + // want: "{}", + // }, + // { + // testName: "single non-empty simple field", + // input: `{"key": 42}`, + // want: `{"key":42}`, + // }, + // { + // testName: "single non-empty array field", + // input: `{"key": [1, true, "answer"]}`, + // want: `{"key":[1,true,"answer"]}`, + // }, + // { + // testName: "single non-empty object field", + // input: `{"key": {"inner": true}}`, + // want: `{"key":{"inner":true}}`, + // }, + // { + // testName: "single null field", + // input: `{"key": null}`, + // want: `{}`, + // }, + // { + // testName: "single empty array field", + // input: `{"key": []}`, + // want: `{}`, + // }, + // { + // testName: "single empty object field", + // input: `{"key": {}}`, + // want: `{}`, + // }, + // { + // testName: "empty fields deeply nested 1 pass", + // input: `{"key": {"a": [1, 2], "b": [], "c": {"d": true, "e": null}}}`, + // want: `{"key":{"a":[1,2],"c":{"d":true}}}`, + // }, + // { + // testName: "empty fields deeply nested 2 passes", + // input: `{"key": {"a": [1, 2], "b": {}, "c": {"d": null}}}`, + // want: `{"key":{"a":[1,2]}}`, + // }, + { + testName: "empty fields deeply nested 2 passes many empty objects", + // input: `{"key": {"a": [1, 2], "b": {}, "c": {"d": {}}, "e": {}, "f": 99}}`, + input: `{"key": {"a": [1, 2], "c": {"d": {}}, "f": 99}}`, + want: `{"key":{"a":[1,2],"f":99}}`, + }, + { + testName: "empty fields nested empty arrays", + input: `{"key": {"a": [1, [2], [], [[]], 3]}}`, + want: `{"key":{"a":[1,[2],3]}}`, + }, + // { + // testName: "real life example", + // input: `{"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"TTSettings":{"TTS3Settings":{},"TTRecordSettings":{}},"Logging":{},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"LoopbackPreventionSettings":{},"CharacterSetSettings":{"CharacterSetSupport":{}},"BeforeImageSettings":{},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}}`, + // want: `{"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}}`, + // }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.testName, func(t *testing.T) { + t.Parallel() + + if got, want := RemoveEmptyFields(testCase.input), testCase.want; got != want { + t.Errorf("RemoveEmptyFields(%q) = %q, want %q", testCase.input, got, want) + } + }) + } +} + +// {"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"TTSettings":{"TTS3Settings":{},"TTRecordSettings":{}},"Logging":{},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"LoopbackPreventionSettings":{},"CharacterSetSettings":{"CharacterSetSupport":{}},"BeforeImageSettings":{},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}} From 825407b6fec4a5bfa3cf29d55895ea9f9107c716 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:50:51 -0500 Subject: [PATCH 269/438] 'flattenSettings' -> 'flattenTaskSettings'. --- internal/service/dms/replication_config.go | 4 +++- internal/service/dms/task_settings_json.go | 6 +++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index 73cc81bf697..6d5862550df 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -233,7 +233,9 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set("replication_config_identifier", replicationConfig.ReplicationConfigIdentifier) - d.Set("replication_settings", flattenSettings(aws.StringValue(replicationConfig.ReplicationSettings))) + v := flattenTaskSettings(aws.StringValue(replicationConfig.ReplicationSettings)) + log.Printf("[INFO] replication_settings=%v", v) + d.Set("replication_settings", v) d.Set("replication_type", replicationConfig.ReplicationType) d.Set("source_endpoint_arn", replicationConfig.SourceEndpointArn) d.Set("supplemental_settings", replicationConfig.SupplementalSettings) diff --git a/internal/service/dms/task_settings_json.go b/internal/service/dms/task_settings_json.go index e67f14943cb..67275921354 100644 --- a/internal/service/dms/task_settings_json.go +++ b/internal/service/dms/task_settings_json.go @@ -10,7 +10,7 @@ import ( // https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html#CHAP_Tasks.CustomizingTasks.TaskSettings.Example // https://mholt.github.io/json-to-go/ -type TaskSettings struct { +type taskSettings struct { TargetMetadata struct { TargetSchema string `json:"TargetSchema,omitempty"` SupportLobs bool `json:"SupportLobs,omitempty"` @@ -143,8 +143,8 @@ type TaskSettings struct { } `json:"ValidationSettings,omitempty"` } -func flattenSettings(apiObject string) string { - var taskSettings TaskSettings +func flattenTaskSettings(apiObject string) string { + var taskSettings taskSettings if err := json.Unmarshal([]byte(apiObject), &taskSettings); err != nil { return apiObject From 4f8ddd6b51a100e90b48d5ee21819a6bb77229ab Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 15:52:12 -0500 Subject: [PATCH 270/438] Fix 'TestStack'. --- internal/types/stack/stack_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/types/stack/stack_test.go b/internal/types/stack/stack_test.go index fd8c70309ad..6a912e02a63 100644 --- a/internal/types/stack/stack_test.go +++ b/internal/types/stack/stack_test.go @@ -16,11 +16,11 @@ func TestStack(t *testing.T) { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.Peek(), interface{}(nil); got != expected { + if got, expected := s.Peek(), 0; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Pop(), interface{}(nil); got != expected { + if got, expected := s.Pop(), 0; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } @@ -30,11 +30,11 @@ func TestStack(t *testing.T) { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.Peek(), interface{}(1); got != expected { + if got, expected := s.Peek(), 1; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Pop(), interface{}(1); got != expected { + if got, expected := s.Pop(), 1; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } @@ -49,15 +49,15 @@ func TestStack(t *testing.T) { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.Peek(), interface{}(3); got != expected { + if got, expected := s.Peek(), 3; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Pop(), interface{}(3); got != expected { + if got, expected := s.Pop(), 3; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Peek(), interface{}(2); got != expected { + if got, expected := s.Peek(), 2; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } } From 370ec894aa82bb4fbeb88ba57cf463eaf9b1856b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:20:57 -0500 Subject: [PATCH 271/438] Move 'internal/option' -> 'internal/types/option'. --- internal/tags/context.go | 10 +++++----- internal/types/{ => option}/option.go | 2 +- internal/types/{ => option}/option_test.go | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) rename internal/types/{ => option}/option.go (98%) rename internal/types/{ => option}/option_test.go (99%) diff --git a/internal/tags/context.go b/internal/tags/context.go index 71663164895..33f0aebd606 100644 --- a/internal/tags/context.go +++ b/internal/tags/context.go @@ -6,7 +6,7 @@ package tags import ( "context" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // InContext represents the tagging information kept in Context. @@ -14,9 +14,9 @@ type InContext struct { DefaultConfig *DefaultConfig IgnoreConfig *IgnoreConfig // TagsIn holds tags specified in configuration. Typically this field includes any default tags and excludes system tags. - TagsIn types.Option[KeyValueTags] + TagsIn option.Option[KeyValueTags] // TagsOut holds tags returned from AWS, including any ignored or system tags. - TagsOut types.Option[KeyValueTags] + TagsOut option.Option[KeyValueTags] } // NewContext returns a Context enhanced with tagging information. @@ -24,8 +24,8 @@ func NewContext(ctx context.Context, defaultConfig *DefaultConfig, ignoreConfig v := InContext{ DefaultConfig: defaultConfig, IgnoreConfig: ignoreConfig, - TagsIn: types.None[KeyValueTags](), - TagsOut: types.None[KeyValueTags](), + TagsIn: option.None[KeyValueTags](), + TagsOut: option.None[KeyValueTags](), } return context.WithValue(ctx, tagKey, &v) diff --git a/internal/types/option.go b/internal/types/option/option.go similarity index 98% rename from internal/types/option.go rename to internal/types/option/option.go index 77d07ec500f..78ecc4a371a 100644 --- a/internal/types/option.go +++ b/internal/types/option/option.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package types +package option type Option[T any] []T diff --git a/internal/types/option_test.go b/internal/types/option/option_test.go similarity index 99% rename from internal/types/option_test.go rename to internal/types/option/option_test.go index 7b4abb51280..c868b637736 100644 --- a/internal/types/option_test.go +++ b/internal/types/option/option_test.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -package types +package option import ( "testing" From d1e9a80c0138ace88a1e7907720cfd1ce268e8ce Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:28:36 -0500 Subject: [PATCH 272/438] generate/tags: 'internal/option' -> 'internal/types/option'. --- internal/generate/tags/main.go | 48 +++++++++---------- .../tags/templates/v1/header_body.tmpl | 4 +- .../tags/templates/v1/list_tags_body.tmpl | 2 +- .../templates/v1/service_tags_map_body.tmpl | 2 +- .../templates/v1/service_tags_slice_body.tmpl | 4 +- .../tags/templates/v2/header_body.tmpl | 4 +- .../tags/templates/v2/list_tags_body.tmpl | 2 +- .../templates/v2/service_tags_map_body.tmpl | 2 +- .../templates/v2/service_tags_slice_body.tmpl | 4 +- .../v2/service_tags_value_map_body.tmpl | 2 +- 10 files changed, 37 insertions(+), 37 deletions(-) diff --git a/internal/generate/tags/main.go b/internal/generate/tags/main.go index 108c1bbeb15..f36d2383c35 100644 --- a/internal/generate/tags/main.go +++ b/internal/generate/tags/main.go @@ -206,18 +206,18 @@ type TemplateData struct { // The following are specific to writing import paths in the `headerBody`; // to include the package, set the corresponding field's value to true - ConnsPkg bool - FmtPkg bool - HelperSchemaPkg bool - InternalTypesPkg bool - LoggingPkg bool - NamesPkg bool - SkipAWSImp bool - SkipServiceImp bool - SkipTypesImp bool - TfLogPkg bool - TfResourcePkg bool - TimePkg bool + ConnsPkg bool + FmtPkg bool + HelperSchemaPkg bool + InternalOptionPkg bool + LoggingPkg bool + NamesPkg bool + SkipAWSImp bool + SkipServiceImp bool + SkipTypesImp bool + TfLogPkg bool + TfResourcePkg bool + TimePkg bool IsDefaultListTags bool IsDefaultUpdateTags bool @@ -298,18 +298,18 @@ func main() { ProviderNameUpper: providerNameUpper, ServicePackage: servicePackage, - ConnsPkg: (*listTags && *listTagsFunc == defaultListTagsFunc) || (*updateTags && *updateTagsFunc == defaultUpdateTagsFunc), - FmtPkg: *updateTags, - HelperSchemaPkg: awsPkg == "autoscaling", - InternalTypesPkg: (*listTags && *listTagsFunc == defaultListTagsFunc) || *serviceTagsMap || *serviceTagsSlice, - LoggingPkg: *updateTags, - NamesPkg: *updateTags && !*skipNamesImp, - SkipAWSImp: *skipAWSImp, - SkipServiceImp: *skipServiceImp, - SkipTypesImp: *skipTypesImp, - TfLogPkg: *updateTags, - TfResourcePkg: (*getTag || *waitForPropagation), - TimePkg: *waitForPropagation, + ConnsPkg: (*listTags && *listTagsFunc == defaultListTagsFunc) || (*updateTags && *updateTagsFunc == defaultUpdateTagsFunc), + FmtPkg: *updateTags, + HelperSchemaPkg: awsPkg == "autoscaling", + InternalOptionPkg: (*listTags && *listTagsFunc == defaultListTagsFunc) || *serviceTagsMap || *serviceTagsSlice, + LoggingPkg: *updateTags, + NamesPkg: *updateTags && !*skipNamesImp, + SkipAWSImp: *skipAWSImp, + SkipServiceImp: *skipServiceImp, + SkipTypesImp: *skipTypesImp, + TfLogPkg: *updateTags, + TfResourcePkg: (*getTag || *waitForPropagation), + TimePkg: *waitForPropagation, CreateTagsFunc: createTagsFunc, GetTagFunc: *getTagFunc, diff --git a/internal/generate/tags/templates/v1/header_body.tmpl b/internal/generate/tags/templates/v1/header_body.tmpl index 6043ab408ce..72cc432cadf 100644 --- a/internal/generate/tags/templates/v1/header_body.tmpl +++ b/internal/generate/tags/templates/v1/header_body.tmpl @@ -44,8 +44,8 @@ import ( {{- if .TfResourcePkg }} "github.com/hashicorp/terraform-provider-aws/internal/tfresource" {{- end }} - {{- if .InternalTypesPkg }} - "github.com/hashicorp/terraform-provider-aws/internal/types" + {{- if .InternalOptionPkg }} + "github.com/hashicorp/terraform-provider-aws/internal/types/option" {{- end }} {{- if .NamesPkg }} "github.com/hashicorp/terraform-provider-aws/names" diff --git a/internal/generate/tags/templates/v1/list_tags_body.tmpl b/internal/generate/tags/templates/v1/list_tags_body.tmpl index 33aa392f4e8..6f4aea7f0e9 100644 --- a/internal/generate/tags/templates/v1/list_tags_body.tmpl +++ b/internal/generate/tags/templates/v1/list_tags_body.tmpl @@ -80,7 +80,7 @@ func (p *servicePackage) {{ .ListTagsFunc | Title }}(ctx context.Context, meta a } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil diff --git a/internal/generate/tags/templates/v1/service_tags_map_body.tmpl b/internal/generate/tags/templates/v1/service_tags_map_body.tmpl index 8f72a4d3f09..c640fb00507 100644 --- a/internal/generate/tags/templates/v1/service_tags_map_body.tmpl +++ b/internal/generate/tags/templates/v1/service_tags_map_body.tmpl @@ -25,7 +25,7 @@ func {{ .GetTagsInFunc }}(ctx context.Context) map[string]*string { // {{ .SetTagsOutFunc }} sets {{ .ServicePackage }} service tags in Context. func {{ .SetTagsOutFunc }}(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags)) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags)) } } diff --git a/internal/generate/tags/templates/v1/service_tags_slice_body.tmpl b/internal/generate/tags/templates/v1/service_tags_slice_body.tmpl index 4f46528a0f3..9233c530eb7 100644 --- a/internal/generate/tags/templates/v1/service_tags_slice_body.tmpl +++ b/internal/generate/tags/templates/v1/service_tags_slice_body.tmpl @@ -241,13 +241,13 @@ func {{ .GetTagsInFunc }}(ctx context.Context) []*{{ .TagPackage }}.{{ .TagType {{- if or ( .TagType2 ) ( .TagTypeAddBoolElem ) }} func {{ .SetTagsOutFunc }}(ctx context.Context, tags any{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string{{ end }}) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}{{ end }})) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}{{ end }})) } } {{- else }} func {{ .SetTagsOutFunc }}(ctx context.Context, tags []*{{ .TagPackage }}.{{ .TagType }}) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags)) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags)) } } {{- end }} diff --git a/internal/generate/tags/templates/v2/header_body.tmpl b/internal/generate/tags/templates/v2/header_body.tmpl index da201d8a767..fadead2fce5 100644 --- a/internal/generate/tags/templates/v2/header_body.tmpl +++ b/internal/generate/tags/templates/v2/header_body.tmpl @@ -41,8 +41,8 @@ import ( {{- if .TfResourcePkg }} "github.com/hashicorp/terraform-provider-aws/internal/tfresource" {{- end }} - {{- if .InternalTypesPkg }} - "github.com/hashicorp/terraform-provider-aws/internal/types" + {{- if .InternalOptionPkg }} + "github.com/hashicorp/terraform-provider-aws/internal/types/option" {{- end }} {{- if .NamesPkg }} "github.com/hashicorp/terraform-provider-aws/names" diff --git a/internal/generate/tags/templates/v2/list_tags_body.tmpl b/internal/generate/tags/templates/v2/list_tags_body.tmpl index ef8f6f54128..cc690346f2d 100644 --- a/internal/generate/tags/templates/v2/list_tags_body.tmpl +++ b/internal/generate/tags/templates/v2/list_tags_body.tmpl @@ -58,7 +58,7 @@ func (p *servicePackage) {{ .ListTagsFunc | Title }}(ctx context.Context, meta a } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil diff --git a/internal/generate/tags/templates/v2/service_tags_map_body.tmpl b/internal/generate/tags/templates/v2/service_tags_map_body.tmpl index 8f72a4d3f09..c640fb00507 100644 --- a/internal/generate/tags/templates/v2/service_tags_map_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_map_body.tmpl @@ -25,7 +25,7 @@ func {{ .GetTagsInFunc }}(ctx context.Context) map[string]*string { // {{ .SetTagsOutFunc }} sets {{ .ServicePackage }} service tags in Context. func {{ .SetTagsOutFunc }}(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags)) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags)) } } diff --git a/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl b/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl index c5aa8ebd415..d18ecd6d42b 100644 --- a/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_slice_body.tmpl @@ -241,13 +241,13 @@ func {{ .GetTagsInFunc }}(ctx context.Context) []awstypes.{{ .TagType }} { {{- if or ( .TagType2 ) ( .TagTypeAddBoolElem ) }} func {{ .SetTagsOutFunc }}(ctx context.Context, tags any{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }} string{{ end }}) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}{{ end }})) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags{{ if .TagTypeIDElem }}, identifier{{ if .TagResTypeElem }}, resourceType{{ end }}{{ end }})) } } {{- else }} func {{ .SetTagsOutFunc }}(ctx context.Context, tags []awstypes.{{ .TagType }}) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags)) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags)) } } {{- end }} diff --git a/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl b/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl index b3918861970..289196b29e9 100644 --- a/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl +++ b/internal/generate/tags/templates/v2/service_tags_value_map_body.tmpl @@ -25,7 +25,7 @@ func {{ .GetTagsInFunc }}(ctx context.Context) map[string]string { // {{ .SetTagsOutFunc }} sets {{ .ServicePackage }} service tags in Context. func {{ .SetTagsOutFunc }}(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some({{ .KeyValueTagsFunc }}(ctx, tags)) + inContext.TagsOut = option.Some({{ .KeyValueTagsFunc }}(ctx, tags)) } } From b80ce96b41d5f3076e189d0bf276498bfd29b4cc Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:32:58 -0500 Subject: [PATCH 273/438] r/aws_lb_target_group_attachment: Tidy up Delete. --- .../service/elbv2/target_group_attachment.go | 29 +++++++++++-------- 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index d344454236f..429a6725277 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -159,26 +159,31 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), + targetGroupARN := d.Get("target_group_arn").(string) + input := &elbv2.DeregisterTargetsInput{ + TargetGroupArn: aws.String(targetGroupARN), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(d.Get("target_id").(string)), + }}, } - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) + if v, ok := d.GetOk("availability_zone"); ok { + input.Targets[0].AvailabilityZone = aws.String(v.(string)) } - if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) + if v, ok := d.GetOk("port"); ok { + input.Targets[0].Port = aws.Int64(int64(v.(int))) } - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, + log.Printf("[DEBUG] Deleting ELBv2 Target Group Attachment: %s", d.Id()) + _, err := conn.DeregisterTargetsWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { + return diags } - _, err := conn.DeregisterTargetsWithContext(ctx, params) - if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return sdkdiag.AppendErrorf(diags, "deregistering Targets: %s", err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "deregistering ELBv2 Target Group (%s) target: %s", targetGroupARN, err) } return diags From bb6806cc5913f2ec68e4f82674aae87d4efe499d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:40:43 -0500 Subject: [PATCH 274/438] Run 'make gen'. --- internal/service/accessanalyzer/tags_gen.go | 6 +++--- internal/service/acm/tags_gen.go | 6 +++--- internal/service/acmpca/tags_gen.go | 6 +++--- internal/service/amp/tags_gen.go | 6 +++--- internal/service/amplify/tags_gen.go | 6 +++--- internal/service/apigateway/tags_gen.go | 4 ++-- internal/service/apigatewayv2/tags_gen.go | 6 +++--- internal/service/appautoscaling/tags_gen.go | 6 +++--- internal/service/appconfig/tags_gen.go | 6 +++--- internal/service/appflow/tags_gen.go | 6 +++--- internal/service/appintegrations/tags_gen.go | 4 ++-- internal/service/applicationinsights/tags_gen.go | 6 +++--- internal/service/appmesh/tags_gen.go | 6 +++--- internal/service/apprunner/tags_gen.go | 6 +++--- internal/service/appstream/tags_gen.go | 6 +++--- internal/service/appsync/tags_gen.go | 6 +++--- internal/service/athena/tags_gen.go | 6 +++--- internal/service/auditmanager/tags_gen.go | 6 +++--- internal/service/autoscaling/tags_gen.go | 6 +++--- internal/service/backup/tags_gen.go | 6 +++--- internal/service/batch/tags_gen.go | 6 +++--- internal/service/ce/tags_gen.go | 6 +++--- internal/service/chime/tags_gen.go | 6 +++--- internal/service/chimesdkmediapipelines/tags_gen.go | 6 +++--- internal/service/chimesdkvoice/tags_gen.go | 6 +++--- internal/service/cleanrooms/tags_gen.go | 6 +++--- internal/service/cloud9/tags_gen.go | 6 +++--- internal/service/cloudformation/tags_gen.go | 4 ++-- internal/service/cloudfront/tags_gen.go | 6 +++--- internal/service/cloudhsmv2/tags_gen.go | 6 +++--- internal/service/cloudtrail/tags_gen.go | 6 +++--- internal/service/cloudwatch/tags_gen.go | 6 +++--- internal/service/codeartifact/tags_gen.go | 6 +++--- internal/service/codebuild/tags_gen.go | 4 ++-- internal/service/codecommit/tags_gen.go | 6 +++--- internal/service/codeguruprofiler/tags_gen.go | 6 +++--- internal/service/codegurureviewer/tags_gen.go | 6 +++--- internal/service/codepipeline/tags_gen.go | 6 +++--- internal/service/codestarconnections/tags_gen.go | 6 +++--- internal/service/codestarnotifications/tags_gen.go | 6 +++--- internal/service/cognitoidentity/tags_gen.go | 6 +++--- internal/service/cognitoidp/tags_gen.go | 6 +++--- internal/service/comprehend/tags_gen.go | 6 +++--- internal/service/configservice/tags_gen.go | 6 +++--- internal/service/connect/tags_gen.go | 4 ++-- internal/service/customerprofiles/tags_gen.go | 6 +++--- internal/service/dataexchange/tags_gen.go | 6 +++--- internal/service/datapipeline/tags_gen.go | 4 ++-- internal/service/datasync/tags_gen.go | 6 +++--- internal/service/dax/tags_gen.go | 6 +++--- internal/service/deploy/tags_gen.go | 6 +++--- internal/service/detective/tags_gen.go | 6 +++--- internal/service/devicefarm/tags_gen.go | 6 +++--- internal/service/directconnect/tags_gen.go | 6 +++--- internal/service/dlm/tags_gen.go | 6 +++--- internal/service/dms/tags_gen.go | 6 +++--- internal/service/docdb/tags_gen.go | 6 +++--- internal/service/docdbelastic/tags_gen.go | 6 +++--- internal/service/ds/tags_gen.go | 6 +++--- internal/service/dynamodb/tags_gen.go | 6 +++--- internal/service/ec2/tags_gen.go | 6 +++--- internal/service/ec2/tagsv2_gen.go | 4 ++-- internal/service/ecr/tags_gen.go | 6 +++--- internal/service/ecrpublic/tags_gen.go | 6 +++--- internal/service/ecs/tags_gen.go | 6 +++--- internal/service/efs/tags_gen.go | 6 +++--- internal/service/eks/tags_gen.go | 6 +++--- internal/service/elasticache/tags_gen.go | 6 +++--- internal/service/elasticbeanstalk/tags_gen.go | 6 +++--- internal/service/elasticsearch/tags_gen.go | 6 +++--- internal/service/elb/tags_gen.go | 6 +++--- internal/service/elbv2/tags_gen.go | 6 +++--- internal/service/emr/tags_gen.go | 4 ++-- internal/service/emrcontainers/tags_gen.go | 6 +++--- internal/service/emrserverless/tags_gen.go | 6 +++--- internal/service/events/tags_gen.go | 6 +++--- internal/service/evidently/tags_gen.go | 4 ++-- internal/service/finspace/tags_gen.go | 6 +++--- internal/service/firehose/tags_gen.go | 6 +++--- internal/service/fis/tags_gen.go | 6 +++--- internal/service/fms/tags_gen.go | 6 +++--- internal/service/fsx/tags_gen.go | 6 +++--- internal/service/gamelift/tags_gen.go | 6 +++--- internal/service/glacier/tags_gen.go | 6 +++--- internal/service/globalaccelerator/tags_gen.go | 6 +++--- internal/service/glue/tags_gen.go | 6 +++--- internal/service/grafana/tags_gen.go | 6 +++--- internal/service/greengrass/tags_gen.go | 6 +++--- internal/service/guardduty/tags_gen.go | 6 +++--- internal/service/healthlake/tags_gen.go | 6 +++--- internal/service/iam/tags_gen.go | 4 ++-- internal/service/imagebuilder/tags_gen.go | 6 +++--- internal/service/inspector/tags_gen.go | 6 +++--- internal/service/internetmonitor/tags_gen.go | 6 +++--- internal/service/iot/tags_gen.go | 6 +++--- internal/service/iotanalytics/tags_gen.go | 6 +++--- internal/service/iotevents/tags_gen.go | 6 +++--- internal/service/ivs/tags_gen.go | 6 +++--- internal/service/ivschat/tags_gen.go | 6 +++--- internal/service/kafka/tags_gen.go | 4 ++-- internal/service/kafka/tagsv2_gen.go | 4 ++-- internal/service/kendra/tags_gen.go | 6 +++--- internal/service/keyspaces/tags_gen.go | 6 +++--- internal/service/kinesis/tags_gen.go | 6 +++--- internal/service/kinesisanalytics/tags_gen.go | 6 +++--- internal/service/kinesisanalyticsv2/tags_gen.go | 6 +++--- internal/service/kinesisvideo/tags_gen.go | 6 +++--- internal/service/kms/tags_gen.go | 6 +++--- internal/service/lambda/tags_gen.go | 6 +++--- internal/service/lexv2models/tags_gen.go | 6 +++--- internal/service/licensemanager/tags_gen.go | 4 ++-- internal/service/lightsail/tags_gen.go | 4 ++-- internal/service/location/tags_gen.go | 6 +++--- internal/service/logs/tags_gen.go | 6 +++--- internal/service/lookoutmetrics/tags_gen.go | 6 +++--- internal/service/macie2/tags_gen.go | 4 ++-- internal/service/mediaconnect/tags_gen.go | 6 +++--- internal/service/mediaconvert/tags_gen.go | 6 +++--- internal/service/medialive/tags_gen.go | 6 +++--- internal/service/mediapackage/tags_gen.go | 6 +++--- internal/service/mediastore/tags_gen.go | 6 +++--- internal/service/memorydb/tags_gen.go | 6 +++--- internal/service/mq/tags_gen.go | 6 +++--- internal/service/mwaa/tags_gen.go | 4 ++-- internal/service/neptune/tags_gen.go | 6 +++--- internal/service/networkfirewall/tags_gen.go | 6 +++--- internal/service/networkmanager/tags_gen.go | 4 ++-- internal/service/oam/tags_gen.go | 6 +++--- internal/service/opensearch/tags_gen.go | 6 +++--- internal/service/opensearchserverless/tags_gen.go | 6 +++--- internal/service/opsworks/tags_gen.go | 6 +++--- internal/service/organizations/tags_gen.go | 6 +++--- internal/service/outposts/tags_gen.go | 4 ++-- internal/service/pinpoint/tags_gen.go | 6 +++--- internal/service/pipes/tags_gen.go | 6 +++--- internal/service/qldb/tags_gen.go | 6 +++--- internal/service/quicksight/tags_gen.go | 6 +++--- internal/service/ram/tags_gen.go | 4 ++-- internal/service/rbin/tags_gen.go | 6 +++--- internal/service/rds/tags_gen.go | 6 +++--- internal/service/redshift/tags_gen.go | 4 ++-- internal/service/redshiftserverless/tags_gen.go | 6 +++--- internal/service/resourceexplorer2/tags_gen.go | 6 +++--- internal/service/resourcegroups/tags_gen.go | 6 +++--- internal/service/resourcegroupstaggingapi/tags_gen.go | 4 ++-- internal/service/rolesanywhere/tags_gen.go | 6 +++--- internal/service/route53/tags_gen.go | 6 +++--- internal/service/route53domains/tags_gen.go | 6 +++--- internal/service/route53recoveryreadiness/tags_gen.go | 6 +++--- internal/service/route53resolver/tags_gen.go | 6 +++--- internal/service/rum/tags_gen.go | 4 ++-- internal/service/s3/tags_gen.go | 4 ++-- internal/service/s3/tagsv2_gen.go | 4 ++-- internal/service/s3control/tags_gen.go | 6 +++--- internal/service/s3control/tagss3_gen.go | 4 ++-- internal/service/sagemaker/tags_gen.go | 6 +++--- internal/service/scheduler/tags_gen.go | 6 +++--- internal/service/schemas/tags_gen.go | 6 +++--- internal/service/secretsmanager/tags_gen.go | 4 ++-- internal/service/securityhub/tags_gen.go | 6 +++--- internal/service/securitylake/tags_gen.go | 6 +++--- internal/service/serverlessrepo/tags_gen.go | 4 ++-- internal/service/servicecatalog/tags_gen.go | 4 ++-- internal/service/servicediscovery/tags_gen.go | 6 +++--- internal/service/sesv2/tags_gen.go | 6 +++--- internal/service/sfn/tags_gen.go | 6 +++--- internal/service/shield/tags_gen.go | 6 +++--- internal/service/signer/tags_gen.go | 6 +++--- internal/service/sns/tags_gen.go | 6 +++--- internal/service/sqs/tags_gen.go | 6 +++--- internal/service/ssm/tags_gen.go | 6 +++--- internal/service/ssmcontacts/tags_gen.go | 6 +++--- internal/service/ssmincidents/tags_gen.go | 6 +++--- internal/service/ssoadmin/tags_gen.go | 6 +++--- internal/service/storagegateway/tags_gen.go | 6 +++--- internal/service/swf/tags_gen.go | 6 +++--- internal/service/synthetics/tags_gen.go | 4 ++-- internal/service/timestreamwrite/tags_gen.go | 6 +++--- internal/service/transcribe/tags_gen.go | 6 +++--- internal/service/transfer/tags_gen.go | 6 +++--- internal/service/vpclattice/tags_gen.go | 6 +++--- internal/service/waf/tags_gen.go | 6 +++--- internal/service/wafregional/tags_gen.go | 6 +++--- internal/service/wafv2/tags_gen.go | 6 +++--- internal/service/worklink/tags_gen.go | 6 +++--- internal/service/workspaces/tags_gen.go | 6 +++--- internal/service/xray/tags_gen.go | 6 +++--- 187 files changed, 532 insertions(+), 532 deletions(-) diff --git a/internal/service/accessanalyzer/tags_gen.go b/internal/service/accessanalyzer/tags_gen.go index 950502764be..85f7cf44e13 100644 --- a/internal/service/accessanalyzer/tags_gen.go +++ b/internal/service/accessanalyzer/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets accessanalyzer service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/acm/tags_gen.go b/internal/service/acm/tags_gen.go index d609890cfcf..295e997bf37 100644 --- a/internal/service/acm/tags_gen.go +++ b/internal/service/acm/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets acm service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/acmpca/tags_gen.go b/internal/service/acmpca/tags_gen.go index e20ae03a3b7..df1097ebdf4 100644 --- a/internal/service/acmpca/tags_gen.go +++ b/internal/service/acmpca/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*acmpca.Tag { // setTagsOut sets acmpca service tags in Context. func setTagsOut(ctx context.Context, tags []*acmpca.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/amp/tags_gen.go b/internal/service/amp/tags_gen.go index 39e1f3ed8bc..f572295e5c0 100644 --- a/internal/service/amp/tags_gen.go +++ b/internal/service/amp/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets amp service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/amplify/tags_gen.go b/internal/service/amplify/tags_gen.go index fb424290ccf..c0675d696d9 100644 --- a/internal/service/amplify/tags_gen.go +++ b/internal/service/amplify/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets amplify service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/apigateway/tags_gen.go b/internal/service/apigateway/tags_gen.go index 8daa9115caf..43069e08601 100644 --- a/internal/service/apigateway/tags_gen.go +++ b/internal/service/apigateway/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets apigateway service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/apigatewayv2/tags_gen.go b/internal/service/apigatewayv2/tags_gen.go index cd194405c9f..c00afe4ba85 100644 --- a/internal/service/apigatewayv2/tags_gen.go +++ b/internal/service/apigatewayv2/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets apigatewayv2 service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appautoscaling/tags_gen.go b/internal/service/appautoscaling/tags_gen.go index 513af866fac..ed6488d7e0e 100644 --- a/internal/service/appautoscaling/tags_gen.go +++ b/internal/service/appautoscaling/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets appautoscaling service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appconfig/tags_gen.go b/internal/service/appconfig/tags_gen.go index a8c9cbea4e5..4e791b86cc1 100644 --- a/internal/service/appconfig/tags_gen.go +++ b/internal/service/appconfig/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets appconfig service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appflow/tags_gen.go b/internal/service/appflow/tags_gen.go index 1bf5d21ea1d..8dea46351b7 100644 --- a/internal/service/appflow/tags_gen.go +++ b/internal/service/appflow/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets appflow service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appintegrations/tags_gen.go b/internal/service/appintegrations/tags_gen.go index 52ca9a09c95..cab3092d60b 100644 --- a/internal/service/appintegrations/tags_gen.go +++ b/internal/service/appintegrations/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets appintegrations service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/applicationinsights/tags_gen.go b/internal/service/applicationinsights/tags_gen.go index 93e283c0d1a..91e48d73fda 100644 --- a/internal/service/applicationinsights/tags_gen.go +++ b/internal/service/applicationinsights/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*applicationinsights.Tag { // setTagsOut sets applicationinsights service tags in Context. func setTagsOut(ctx context.Context, tags []*applicationinsights.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appmesh/tags_gen.go b/internal/service/appmesh/tags_gen.go index 4f5d9cf6017..d2fd957c1ed 100644 --- a/internal/service/appmesh/tags_gen.go +++ b/internal/service/appmesh/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*appmesh.TagRef { // setTagsOut sets appmesh service tags in Context. func setTagsOut(ctx context.Context, tags []*appmesh.TagRef) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/apprunner/tags_gen.go b/internal/service/apprunner/tags_gen.go index 0c99e1ec696..8dd75c79144 100644 --- a/internal/service/apprunner/tags_gen.go +++ b/internal/service/apprunner/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets apprunner service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appstream/tags_gen.go b/internal/service/appstream/tags_gen.go index 5d63387e8b0..884191b839f 100644 --- a/internal/service/appstream/tags_gen.go +++ b/internal/service/appstream/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets appstream service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/appsync/tags_gen.go b/internal/service/appsync/tags_gen.go index 1074f18a837..bd6eb207346 100644 --- a/internal/service/appsync/tags_gen.go +++ b/internal/service/appsync/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets appsync service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/athena/tags_gen.go b/internal/service/athena/tags_gen.go index 00e4ab74c7a..67efdee0284 100644 --- a/internal/service/athena/tags_gen.go +++ b/internal/service/athena/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets athena service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/auditmanager/tags_gen.go b/internal/service/auditmanager/tags_gen.go index cb2e7073c02..a535b88709b 100644 --- a/internal/service/auditmanager/tags_gen.go +++ b/internal/service/auditmanager/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets auditmanager service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/autoscaling/tags_gen.go b/internal/service/autoscaling/tags_gen.go index c9c57ba280f..a3242be0421 100644 --- a/internal/service/autoscaling/tags_gen.go +++ b/internal/service/autoscaling/tags_gen.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -84,7 +84,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -236,7 +236,7 @@ func getTagsIn(ctx context.Context) []*autoscaling.Tag { // setTagsOut sets autoscaling service tags in Context. func setTagsOut(ctx context.Context, tags any, identifier, resourceType string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags, identifier, resourceType)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags, identifier, resourceType)) } } diff --git a/internal/service/backup/tags_gen.go b/internal/service/backup/tags_gen.go index 3e37fe6cea3..9b1d6b0913c 100644 --- a/internal/service/backup/tags_gen.go +++ b/internal/service/backup/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets backup service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/batch/tags_gen.go b/internal/service/batch/tags_gen.go index e4f9233d685..9e7e6bea062 100644 --- a/internal/service/batch/tags_gen.go +++ b/internal/service/batch/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets batch service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ce/tags_gen.go b/internal/service/ce/tags_gen.go index cc23c7b111a..726ac69dc83 100644 --- a/internal/service/ce/tags_gen.go +++ b/internal/service/ce/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*costexplorer.ResourceTag { // setTagsOut sets ce service tags in Context. func setTagsOut(ctx context.Context, tags []*costexplorer.ResourceTag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/chime/tags_gen.go b/internal/service/chime/tags_gen.go index a5c227167cc..6e284296428 100644 --- a/internal/service/chime/tags_gen.go +++ b/internal/service/chime/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets chime service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/chimesdkmediapipelines/tags_gen.go b/internal/service/chimesdkmediapipelines/tags_gen.go index e0c850b8037..ebf1ccba859 100644 --- a/internal/service/chimesdkmediapipelines/tags_gen.go +++ b/internal/service/chimesdkmediapipelines/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets chimesdkmediapipelines service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/chimesdkvoice/tags_gen.go b/internal/service/chimesdkvoice/tags_gen.go index b3fee7aff27..3e69b1023c2 100644 --- a/internal/service/chimesdkvoice/tags_gen.go +++ b/internal/service/chimesdkvoice/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets chimesdkvoice service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cleanrooms/tags_gen.go b/internal/service/cleanrooms/tags_gen.go index 46560e586b4..25cab827ad3 100644 --- a/internal/service/cleanrooms/tags_gen.go +++ b/internal/service/cleanrooms/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets cleanrooms service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cloud9/tags_gen.go b/internal/service/cloud9/tags_gen.go index e421d534c47..d344302b785 100644 --- a/internal/service/cloud9/tags_gen.go +++ b/internal/service/cloud9/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*cloud9.Tag { // setTagsOut sets cloud9 service tags in Context. func setTagsOut(ctx context.Context, tags []*cloud9.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cloudformation/tags_gen.go b/internal/service/cloudformation/tags_gen.go index 29ab991c91b..a6b0a18423e 100644 --- a/internal/service/cloudformation/tags_gen.go +++ b/internal/service/cloudformation/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudformation" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []*cloudformation.Tag { // setTagsOut sets cloudformation service tags in Context. func setTagsOut(ctx context.Context, tags []*cloudformation.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cloudfront/tags_gen.go b/internal/service/cloudfront/tags_gen.go index 555cc86a719..38bb44097c1 100644 --- a/internal/service/cloudfront/tags_gen.go +++ b/internal/service/cloudfront/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*cloudfront.Tag { // setTagsOut sets cloudfront service tags in Context. func setTagsOut(ctx context.Context, tags []*cloudfront.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cloudhsmv2/tags_gen.go b/internal/service/cloudhsmv2/tags_gen.go index f61f9fff5c8..6faa2abc71d 100644 --- a/internal/service/cloudhsmv2/tags_gen.go +++ b/internal/service/cloudhsmv2/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*cloudhsmv2.Tag { // setTagsOut sets cloudhsmv2 service tags in Context. func setTagsOut(ctx context.Context, tags []*cloudhsmv2.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cloudtrail/tags_gen.go b/internal/service/cloudtrail/tags_gen.go index 3ff41754a98..405fe71aecd 100644 --- a/internal/service/cloudtrail/tags_gen.go +++ b/internal/service/cloudtrail/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*cloudtrail.Tag { // setTagsOut sets cloudtrail service tags in Context. func setTagsOut(ctx context.Context, tags []*cloudtrail.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cloudwatch/tags_gen.go b/internal/service/cloudwatch/tags_gen.go index e54959abddf..b99205cb66a 100644 --- a/internal/service/cloudwatch/tags_gen.go +++ b/internal/service/cloudwatch/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*cloudwatch.Tag { // setTagsOut sets cloudwatch service tags in Context. func setTagsOut(ctx context.Context, tags []*cloudwatch.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codeartifact/tags_gen.go b/internal/service/codeartifact/tags_gen.go index 5d8fde828d2..e5f97fecb93 100644 --- a/internal/service/codeartifact/tags_gen.go +++ b/internal/service/codeartifact/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*codeartifact.Tag { // setTagsOut sets codeartifact service tags in Context. func setTagsOut(ctx context.Context, tags []*codeartifact.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codebuild/tags_gen.go b/internal/service/codebuild/tags_gen.go index f96f2056d22..4047d797bca 100644 --- a/internal/service/codebuild/tags_gen.go +++ b/internal/service/codebuild/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/codebuild" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []*codebuild.Tag { // setTagsOut sets codebuild service tags in Context. func setTagsOut(ctx context.Context, tags []*codebuild.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codecommit/tags_gen.go b/internal/service/codecommit/tags_gen.go index 27c1ed988c1..09c1bfeb0a7 100644 --- a/internal/service/codecommit/tags_gen.go +++ b/internal/service/codecommit/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets codecommit service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codeguruprofiler/tags_gen.go b/internal/service/codeguruprofiler/tags_gen.go index e16091f7185..853cf09d859 100644 --- a/internal/service/codeguruprofiler/tags_gen.go +++ b/internal/service/codeguruprofiler/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets codeguruprofiler service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codegurureviewer/tags_gen.go b/internal/service/codegurureviewer/tags_gen.go index 40ae57a3592..9145195936b 100644 --- a/internal/service/codegurureviewer/tags_gen.go +++ b/internal/service/codegurureviewer/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets codegurureviewer service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codepipeline/tags_gen.go b/internal/service/codepipeline/tags_gen.go index 2c632012c2f..fc322870c26 100644 --- a/internal/service/codepipeline/tags_gen.go +++ b/internal/service/codepipeline/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*codepipeline.Tag { // setTagsOut sets codepipeline service tags in Context. func setTagsOut(ctx context.Context, tags []*codepipeline.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codestarconnections/tags_gen.go b/internal/service/codestarconnections/tags_gen.go index 02b6d1d2cc9..273ed514fa8 100644 --- a/internal/service/codestarconnections/tags_gen.go +++ b/internal/service/codestarconnections/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets codestarconnections service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/codestarnotifications/tags_gen.go b/internal/service/codestarnotifications/tags_gen.go index 8826c3f113c..6c205063561 100644 --- a/internal/service/codestarnotifications/tags_gen.go +++ b/internal/service/codestarnotifications/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets codestarnotifications service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cognitoidentity/tags_gen.go b/internal/service/cognitoidentity/tags_gen.go index 3a508555072..0d661cd5a4b 100644 --- a/internal/service/cognitoidentity/tags_gen.go +++ b/internal/service/cognitoidentity/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets cognitoidentity service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/cognitoidp/tags_gen.go b/internal/service/cognitoidp/tags_gen.go index 820dbc4d013..7f201d4ac34 100644 --- a/internal/service/cognitoidp/tags_gen.go +++ b/internal/service/cognitoidp/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets cognitoidp service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/comprehend/tags_gen.go b/internal/service/comprehend/tags_gen.go index 3d33f7eef29..1770b76e9e6 100644 --- a/internal/service/comprehend/tags_gen.go +++ b/internal/service/comprehend/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets comprehend service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/configservice/tags_gen.go b/internal/service/configservice/tags_gen.go index 77c931636de..e1664a530fd 100644 --- a/internal/service/configservice/tags_gen.go +++ b/internal/service/configservice/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*configservice.Tag { // setTagsOut sets configservice service tags in Context. func setTagsOut(ctx context.Context, tags []*configservice.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/connect/tags_gen.go b/internal/service/connect/tags_gen.go index cafb349e942..8a232833f5d 100644 --- a/internal/service/connect/tags_gen.go +++ b/internal/service/connect/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets connect service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/customerprofiles/tags_gen.go b/internal/service/customerprofiles/tags_gen.go index f6e90a98133..200a68268d5 100644 --- a/internal/service/customerprofiles/tags_gen.go +++ b/internal/service/customerprofiles/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets customerprofiles service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/dataexchange/tags_gen.go b/internal/service/dataexchange/tags_gen.go index d97349220b1..2b91bfbcf2e 100644 --- a/internal/service/dataexchange/tags_gen.go +++ b/internal/service/dataexchange/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets dataexchange service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/datapipeline/tags_gen.go b/internal/service/datapipeline/tags_gen.go index efa5ef6667e..efd17707caa 100644 --- a/internal/service/datapipeline/tags_gen.go +++ b/internal/service/datapipeline/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*datapipeline.Tag { // setTagsOut sets datapipeline service tags in Context. func setTagsOut(ctx context.Context, tags []*datapipeline.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/datasync/tags_gen.go b/internal/service/datasync/tags_gen.go index c970f250840..b761fcc9dc6 100644 --- a/internal/service/datasync/tags_gen.go +++ b/internal/service/datasync/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*datasync.TagListEntry { // setTagsOut sets datasync service tags in Context. func setTagsOut(ctx context.Context, tags []*datasync.TagListEntry) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/dax/tags_gen.go b/internal/service/dax/tags_gen.go index 35dfbc181d3..b591bcec4b4 100644 --- a/internal/service/dax/tags_gen.go +++ b/internal/service/dax/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*dax.Tag { // setTagsOut sets dax service tags in Context. func setTagsOut(ctx context.Context, tags []*dax.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/deploy/tags_gen.go b/internal/service/deploy/tags_gen.go index 792997529d0..50e2b60604a 100644 --- a/internal/service/deploy/tags_gen.go +++ b/internal/service/deploy/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets deploy service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/detective/tags_gen.go b/internal/service/detective/tags_gen.go index aa55884f016..07123082e85 100644 --- a/internal/service/detective/tags_gen.go +++ b/internal/service/detective/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets detective service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/devicefarm/tags_gen.go b/internal/service/devicefarm/tags_gen.go index f3c00bf3d35..377920ef91a 100644 --- a/internal/service/devicefarm/tags_gen.go +++ b/internal/service/devicefarm/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*devicefarm.Tag { // setTagsOut sets devicefarm service tags in Context. func setTagsOut(ctx context.Context, tags []*devicefarm.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/directconnect/tags_gen.go b/internal/service/directconnect/tags_gen.go index 708a0671e74..36eff3c345e 100644 --- a/internal/service/directconnect/tags_gen.go +++ b/internal/service/directconnect/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*directconnect.Tag { // setTagsOut sets directconnect service tags in Context. func setTagsOut(ctx context.Context, tags []*directconnect.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/dlm/tags_gen.go b/internal/service/dlm/tags_gen.go index a033b675e32..a9b30d13b2c 100644 --- a/internal/service/dlm/tags_gen.go +++ b/internal/service/dlm/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets dlm service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/dms/tags_gen.go b/internal/service/dms/tags_gen.go index 887a3a40432..48393cf0070 100644 --- a/internal/service/dms/tags_gen.go +++ b/internal/service/dms/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*databasemigrationservice.Tag { // setTagsOut sets dms service tags in Context. func setTagsOut(ctx context.Context, tags []*databasemigrationservice.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/docdb/tags_gen.go b/internal/service/docdb/tags_gen.go index 5375f9c34b7..3e538b3f872 100644 --- a/internal/service/docdb/tags_gen.go +++ b/internal/service/docdb/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*docdb.Tag { // setTagsOut sets docdb service tags in Context. func setTagsOut(ctx context.Context, tags []*docdb.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/docdbelastic/tags_gen.go b/internal/service/docdbelastic/tags_gen.go index b8743e2c6fa..90eeda314c2 100644 --- a/internal/service/docdbelastic/tags_gen.go +++ b/internal/service/docdbelastic/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets docdbelastic service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ds/tags_gen.go b/internal/service/ds/tags_gen.go index e01d9e361a5..6d5db3537d8 100644 --- a/internal/service/ds/tags_gen.go +++ b/internal/service/ds/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*directoryservice.Tag { // setTagsOut sets ds service tags in Context. func setTagsOut(ctx context.Context, tags []*directoryservice.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/dynamodb/tags_gen.go b/internal/service/dynamodb/tags_gen.go index 5945d531a9b..5025ed60632 100644 --- a/internal/service/dynamodb/tags_gen.go +++ b/internal/service/dynamodb/tags_gen.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -72,7 +72,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -122,7 +122,7 @@ func getTagsIn(ctx context.Context) []*dynamodb.Tag { // setTagsOut sets dynamodb service tags in Context. func setTagsOut(ctx context.Context, tags []*dynamodb.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ec2/tags_gen.go b/internal/service/ec2/tags_gen.go index 089c3d15384..601d2db6a9d 100644 --- a/internal/service/ec2/tags_gen.go +++ b/internal/service/ec2/tags_gen.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -83,7 +83,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -150,7 +150,7 @@ func getTagsIn(ctx context.Context) []*ec2.Tag { // setTagsOut sets ec2 service tags in Context. func setTagsOut(ctx context.Context, tags any) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ec2/tagsv2_gen.go b/internal/service/ec2/tagsv2_gen.go index 8c6b3dd1f1a..77d95558129 100644 --- a/internal/service/ec2/tagsv2_gen.go +++ b/internal/service/ec2/tagsv2_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -59,7 +59,7 @@ func getTagsInV2(ctx context.Context) []awstypes.Tag { // setTagsOutV2 sets ec2 service tags in Context. func setTagsOutV2(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(keyValueTagsV2(ctx, tags)) + inContext.TagsOut = option.Some(keyValueTagsV2(ctx, tags)) } } diff --git a/internal/service/ecr/tags_gen.go b/internal/service/ecr/tags_gen.go index 876c7730907..0c3bf816d13 100644 --- a/internal/service/ecr/tags_gen.go +++ b/internal/service/ecr/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*ecr.Tag { // setTagsOut sets ecr service tags in Context. func setTagsOut(ctx context.Context, tags []*ecr.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ecrpublic/tags_gen.go b/internal/service/ecrpublic/tags_gen.go index d6dda0b252b..d9d724e12cb 100644 --- a/internal/service/ecrpublic/tags_gen.go +++ b/internal/service/ecrpublic/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*ecrpublic.Tag { // setTagsOut sets ecrpublic service tags in Context. func setTagsOut(ctx context.Context, tags []*ecrpublic.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ecs/tags_gen.go b/internal/service/ecs/tags_gen.go index b6f8903a0e4..0c7e9308729 100644 --- a/internal/service/ecs/tags_gen.go +++ b/internal/service/ecs/tags_gen.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -72,7 +72,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -122,7 +122,7 @@ func getTagsIn(ctx context.Context) []*ecs.Tag { // setTagsOut sets ecs service tags in Context. func setTagsOut(ctx context.Context, tags []*ecs.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/efs/tags_gen.go b/internal/service/efs/tags_gen.go index 5931c30e736..e17108f0cbf 100644 --- a/internal/service/efs/tags_gen.go +++ b/internal/service/efs/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*efs.Tag { // setTagsOut sets efs service tags in Context. func setTagsOut(ctx context.Context, tags []*efs.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/eks/tags_gen.go b/internal/service/eks/tags_gen.go index 785e78ad79d..e6553813726 100644 --- a/internal/service/eks/tags_gen.go +++ b/internal/service/eks/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets eks service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/elasticache/tags_gen.go b/internal/service/elasticache/tags_gen.go index 07b0c1b6127..51cf3d9882e 100644 --- a/internal/service/elasticache/tags_gen.go +++ b/internal/service/elasticache/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*elasticache.Tag { // setTagsOut sets elasticache service tags in Context. func setTagsOut(ctx context.Context, tags []*elasticache.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/elasticbeanstalk/tags_gen.go b/internal/service/elasticbeanstalk/tags_gen.go index e974c1f9e74..a506b699995 100644 --- a/internal/service/elasticbeanstalk/tags_gen.go +++ b/internal/service/elasticbeanstalk/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*elasticbeanstalk.Tag { // setTagsOut sets elasticbeanstalk service tags in Context. func setTagsOut(ctx context.Context, tags []*elasticbeanstalk.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/elasticsearch/tags_gen.go b/internal/service/elasticsearch/tags_gen.go index cc9182a1f3a..a4c0359a967 100644 --- a/internal/service/elasticsearch/tags_gen.go +++ b/internal/service/elasticsearch/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*elasticsearchservice.Tag { // setTagsOut sets elasticsearch service tags in Context. func setTagsOut(ctx context.Context, tags []*elasticsearchservice.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/elb/tags_gen.go b/internal/service/elb/tags_gen.go index eca6a22938c..57d0b4e9a54 100644 --- a/internal/service/elb/tags_gen.go +++ b/internal/service/elb/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -108,7 +108,7 @@ func getTagsIn(ctx context.Context) []*elb.Tag { // setTagsOut sets elb service tags in Context. func setTagsOut(ctx context.Context, tags []*elb.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/elbv2/tags_gen.go b/internal/service/elbv2/tags_gen.go index 64d1387a8b2..54e9a67ee48 100644 --- a/internal/service/elbv2/tags_gen.go +++ b/internal/service/elbv2/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*elbv2.Tag { // setTagsOut sets elbv2 service tags in Context. func setTagsOut(ctx context.Context, tags []*elbv2.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/emr/tags_gen.go b/internal/service/emr/tags_gen.go index 76d694046bf..73df2b5d6f4 100644 --- a/internal/service/emr/tags_gen.go +++ b/internal/service/emr/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*emr.Tag { // setTagsOut sets emr service tags in Context. func setTagsOut(ctx context.Context, tags []*emr.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/emrcontainers/tags_gen.go b/internal/service/emrcontainers/tags_gen.go index 28781419c8a..fe3aa0070f9 100644 --- a/internal/service/emrcontainers/tags_gen.go +++ b/internal/service/emrcontainers/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets emrcontainers service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/emrserverless/tags_gen.go b/internal/service/emrserverless/tags_gen.go index b13871f03f4..24e162216e0 100644 --- a/internal/service/emrserverless/tags_gen.go +++ b/internal/service/emrserverless/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets emrserverless service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/events/tags_gen.go b/internal/service/events/tags_gen.go index 25a666e566f..a8cc8785ab4 100644 --- a/internal/service/events/tags_gen.go +++ b/internal/service/events/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*eventbridge.Tag { // setTagsOut sets events service tags in Context. func setTagsOut(ctx context.Context, tags []*eventbridge.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/evidently/tags_gen.go b/internal/service/evidently/tags_gen.go index 9bee7f2d9e5..fa55734a171 100644 --- a/internal/service/evidently/tags_gen.go +++ b/internal/service/evidently/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets evidently service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/finspace/tags_gen.go b/internal/service/finspace/tags_gen.go index 7a99feeab1c..8abb0deb456 100644 --- a/internal/service/finspace/tags_gen.go +++ b/internal/service/finspace/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets finspace service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/firehose/tags_gen.go b/internal/service/firehose/tags_gen.go index d16d6f61de8..806e3854ea3 100644 --- a/internal/service/firehose/tags_gen.go +++ b/internal/service/firehose/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*firehose.Tag { // setTagsOut sets firehose service tags in Context. func setTagsOut(ctx context.Context, tags []*firehose.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/fis/tags_gen.go b/internal/service/fis/tags_gen.go index bb3743ef03f..28293c4c25f 100644 --- a/internal/service/fis/tags_gen.go +++ b/internal/service/fis/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets fis service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/fms/tags_gen.go b/internal/service/fms/tags_gen.go index 740a23368c6..1fa9846a2c6 100644 --- a/internal/service/fms/tags_gen.go +++ b/internal/service/fms/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*fms.Tag { // setTagsOut sets fms service tags in Context. func setTagsOut(ctx context.Context, tags []*fms.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/fsx/tags_gen.go b/internal/service/fsx/tags_gen.go index 0e1d9cdfe31..cdd98c088a7 100644 --- a/internal/service/fsx/tags_gen.go +++ b/internal/service/fsx/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*fsx.Tag { // setTagsOut sets fsx service tags in Context. func setTagsOut(ctx context.Context, tags []*fsx.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/gamelift/tags_gen.go b/internal/service/gamelift/tags_gen.go index a4a49ffb078..8c8a193c52b 100644 --- a/internal/service/gamelift/tags_gen.go +++ b/internal/service/gamelift/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*gamelift.Tag { // setTagsOut sets gamelift service tags in Context. func setTagsOut(ctx context.Context, tags []*gamelift.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/glacier/tags_gen.go b/internal/service/glacier/tags_gen.go index feacf0e8c6f..82f1ce5d4e2 100644 --- a/internal/service/glacier/tags_gen.go +++ b/internal/service/glacier/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets glacier service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/globalaccelerator/tags_gen.go b/internal/service/globalaccelerator/tags_gen.go index a388d484249..e9974334e9e 100644 --- a/internal/service/globalaccelerator/tags_gen.go +++ b/internal/service/globalaccelerator/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*globalaccelerator.Tag { // setTagsOut sets globalaccelerator service tags in Context. func setTagsOut(ctx context.Context, tags []*globalaccelerator.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/glue/tags_gen.go b/internal/service/glue/tags_gen.go index 6cce2f144e3..cfef903f15c 100644 --- a/internal/service/glue/tags_gen.go +++ b/internal/service/glue/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets glue service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/grafana/tags_gen.go b/internal/service/grafana/tags_gen.go index 28ced98ebb6..5dd8becdadf 100644 --- a/internal/service/grafana/tags_gen.go +++ b/internal/service/grafana/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets grafana service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/greengrass/tags_gen.go b/internal/service/greengrass/tags_gen.go index 656a2df5a50..76b1d773c02 100644 --- a/internal/service/greengrass/tags_gen.go +++ b/internal/service/greengrass/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets greengrass service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/guardduty/tags_gen.go b/internal/service/guardduty/tags_gen.go index 5688c67eb2d..004e9628cde 100644 --- a/internal/service/guardduty/tags_gen.go +++ b/internal/service/guardduty/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets guardduty service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/healthlake/tags_gen.go b/internal/service/healthlake/tags_gen.go index 58e89c1e591..1cf4cf78277 100644 --- a/internal/service/healthlake/tags_gen.go +++ b/internal/service/healthlake/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets healthlake service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/iam/tags_gen.go b/internal/service/iam/tags_gen.go index e3b900f95f7..1b56f3698e3 100644 --- a/internal/service/iam/tags_gen.go +++ b/internal/service/iam/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/iam" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []*iam.Tag { // setTagsOut sets iam service tags in Context. func setTagsOut(ctx context.Context, tags []*iam.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/imagebuilder/tags_gen.go b/internal/service/imagebuilder/tags_gen.go index 4e8b539e48b..9c11e5da020 100644 --- a/internal/service/imagebuilder/tags_gen.go +++ b/internal/service/imagebuilder/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets imagebuilder service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/inspector/tags_gen.go b/internal/service/inspector/tags_gen.go index b4a675fe131..d4d7e73099d 100644 --- a/internal/service/inspector/tags_gen.go +++ b/internal/service/inspector/tags_gen.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go/service/inspector/inspectoriface" "github.com/hashicorp/terraform-provider-aws/internal/conns" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // listTags lists inspector service tags. @@ -39,7 +39,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -89,6 +89,6 @@ func getTagsIn(ctx context.Context) []*inspector.Tag { // setTagsOut sets inspector service tags in Context. func setTagsOut(ctx context.Context, tags []*inspector.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/internetmonitor/tags_gen.go b/internal/service/internetmonitor/tags_gen.go index fa8fc1e7bee..1875efad246 100644 --- a/internal/service/internetmonitor/tags_gen.go +++ b/internal/service/internetmonitor/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets internetmonitor service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/iot/tags_gen.go b/internal/service/iot/tags_gen.go index ce6faf2d8fa..a6af1e4d3a1 100644 --- a/internal/service/iot/tags_gen.go +++ b/internal/service/iot/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*iot.Tag { // setTagsOut sets iot service tags in Context. func setTagsOut(ctx context.Context, tags []*iot.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/iotanalytics/tags_gen.go b/internal/service/iotanalytics/tags_gen.go index e501838950a..c4803733f28 100644 --- a/internal/service/iotanalytics/tags_gen.go +++ b/internal/service/iotanalytics/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*iotanalytics.Tag { // setTagsOut sets iotanalytics service tags in Context. func setTagsOut(ctx context.Context, tags []*iotanalytics.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/iotevents/tags_gen.go b/internal/service/iotevents/tags_gen.go index 2963909ed2e..1a6b7912d8b 100644 --- a/internal/service/iotevents/tags_gen.go +++ b/internal/service/iotevents/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*iotevents.Tag { // setTagsOut sets iotevents service tags in Context. func setTagsOut(ctx context.Context, tags []*iotevents.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ivs/tags_gen.go b/internal/service/ivs/tags_gen.go index 257f99aab98..05b645c5056 100644 --- a/internal/service/ivs/tags_gen.go +++ b/internal/service/ivs/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets ivs service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ivschat/tags_gen.go b/internal/service/ivschat/tags_gen.go index 3b7340fef26..ddea8c0ebbd 100644 --- a/internal/service/ivschat/tags_gen.go +++ b/internal/service/ivschat/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets ivschat service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kafka/tags_gen.go b/internal/service/kafka/tags_gen.go index e9f1e24be20..66d97f1c1ba 100644 --- a/internal/service/kafka/tags_gen.go +++ b/internal/service/kafka/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets kafka service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kafka/tagsv2_gen.go b/internal/service/kafka/tagsv2_gen.go index 58b43137e13..3e59f4933cc 100644 --- a/internal/service/kafka/tagsv2_gen.go +++ b/internal/service/kafka/tagsv2_gen.go @@ -5,7 +5,7 @@ import ( "context" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // map[string]string handling @@ -35,6 +35,6 @@ func getTagsInV2(ctx context.Context) map[string]string { // setTagsOutV2 sets kafka service tags in Context. func setTagsOutV2(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(keyValueTagsV2(ctx, tags)) + inContext.TagsOut = option.Some(keyValueTagsV2(ctx, tags)) } } diff --git a/internal/service/kendra/tags_gen.go b/internal/service/kendra/tags_gen.go index 1864dd5810e..bc22c64c859 100644 --- a/internal/service/kendra/tags_gen.go +++ b/internal/service/kendra/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets kendra service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/keyspaces/tags_gen.go b/internal/service/keyspaces/tags_gen.go index 738c63beae8..ae47d23a2bb 100644 --- a/internal/service/keyspaces/tags_gen.go +++ b/internal/service/keyspaces/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets keyspaces service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kinesis/tags_gen.go b/internal/service/kinesis/tags_gen.go index d9731b3c5bd..00f1e48a9b2 100644 --- a/internal/service/kinesis/tags_gen.go +++ b/internal/service/kinesis/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*kinesis.Tag { // setTagsOut sets kinesis service tags in Context. func setTagsOut(ctx context.Context, tags []*kinesis.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kinesisanalytics/tags_gen.go b/internal/service/kinesisanalytics/tags_gen.go index 74ad87f7168..76ff98ea485 100644 --- a/internal/service/kinesisanalytics/tags_gen.go +++ b/internal/service/kinesisanalytics/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*kinesisanalytics.Tag { // setTagsOut sets kinesisanalytics service tags in Context. func setTagsOut(ctx context.Context, tags []*kinesisanalytics.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kinesisanalyticsv2/tags_gen.go b/internal/service/kinesisanalyticsv2/tags_gen.go index cdd12fd9809..7fc2bce5e65 100644 --- a/internal/service/kinesisanalyticsv2/tags_gen.go +++ b/internal/service/kinesisanalyticsv2/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*kinesisanalyticsv2.Tag { // setTagsOut sets kinesisanalyticsv2 service tags in Context. func setTagsOut(ctx context.Context, tags []*kinesisanalyticsv2.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kinesisvideo/tags_gen.go b/internal/service/kinesisvideo/tags_gen.go index 1b9a12679f9..2ada6304244 100644 --- a/internal/service/kinesisvideo/tags_gen.go +++ b/internal/service/kinesisvideo/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets kinesisvideo service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/kms/tags_gen.go b/internal/service/kms/tags_gen.go index 110ab9100c1..ccf4e8243e9 100644 --- a/internal/service/kms/tags_gen.go +++ b/internal/service/kms/tags_gen.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -67,7 +67,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -117,7 +117,7 @@ func getTagsIn(ctx context.Context) []*kms.Tag { // setTagsOut sets kms service tags in Context. func setTagsOut(ctx context.Context, tags []*kms.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/lambda/tags_gen.go b/internal/service/lambda/tags_gen.go index af734162610..312aa516850 100644 --- a/internal/service/lambda/tags_gen.go +++ b/internal/service/lambda/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets lambda service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/lexv2models/tags_gen.go b/internal/service/lexv2models/tags_gen.go index 35eb09faf43..70d66945cc3 100644 --- a/internal/service/lexv2models/tags_gen.go +++ b/internal/service/lexv2models/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets lexv2models service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/licensemanager/tags_gen.go b/internal/service/licensemanager/tags_gen.go index 80726944546..9220395941f 100644 --- a/internal/service/licensemanager/tags_gen.go +++ b/internal/service/licensemanager/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*licensemanager.Tag { // setTagsOut sets licensemanager service tags in Context. func setTagsOut(ctx context.Context, tags []*licensemanager.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/lightsail/tags_gen.go b/internal/service/lightsail/tags_gen.go index 454766d7766..679f3344f24 100644 --- a/internal/service/lightsail/tags_gen.go +++ b/internal/service/lightsail/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets lightsail service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/location/tags_gen.go b/internal/service/location/tags_gen.go index da39ba34b68..3cf6c3b3744 100644 --- a/internal/service/location/tags_gen.go +++ b/internal/service/location/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets location service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/logs/tags_gen.go b/internal/service/logs/tags_gen.go index 2d7de5e09d9..8f73ef73ea1 100644 --- a/internal/service/logs/tags_gen.go +++ b/internal/service/logs/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets logs service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/lookoutmetrics/tags_gen.go b/internal/service/lookoutmetrics/tags_gen.go index 184805d77e0..4afde737009 100644 --- a/internal/service/lookoutmetrics/tags_gen.go +++ b/internal/service/lookoutmetrics/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets lookoutmetrics service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/macie2/tags_gen.go b/internal/service/macie2/tags_gen.go index 4585dd94808..1f5008af05e 100644 --- a/internal/service/macie2/tags_gen.go +++ b/internal/service/macie2/tags_gen.go @@ -6,7 +6,7 @@ import ( "github.com/aws/aws-sdk-go/aws" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // map[string]*string handling @@ -36,6 +36,6 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets macie2 service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/mediaconnect/tags_gen.go b/internal/service/mediaconnect/tags_gen.go index 83416cc498f..96f33f52719 100644 --- a/internal/service/mediaconnect/tags_gen.go +++ b/internal/service/mediaconnect/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets mediaconnect service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/mediaconvert/tags_gen.go b/internal/service/mediaconvert/tags_gen.go index 7c7db2d562d..71fde3560ac 100644 --- a/internal/service/mediaconvert/tags_gen.go +++ b/internal/service/mediaconvert/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets mediaconvert service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/medialive/tags_gen.go b/internal/service/medialive/tags_gen.go index 6960380a4a1..d2a1a66de42 100644 --- a/internal/service/medialive/tags_gen.go +++ b/internal/service/medialive/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets medialive service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/mediapackage/tags_gen.go b/internal/service/mediapackage/tags_gen.go index a9556ee351e..cac2b77b878 100644 --- a/internal/service/mediapackage/tags_gen.go +++ b/internal/service/mediapackage/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets mediapackage service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/mediastore/tags_gen.go b/internal/service/mediastore/tags_gen.go index dc0a0bb7824..b7095fc02ab 100644 --- a/internal/service/mediastore/tags_gen.go +++ b/internal/service/mediastore/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*mediastore.Tag { // setTagsOut sets mediastore service tags in Context. func setTagsOut(ctx context.Context, tags []*mediastore.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/memorydb/tags_gen.go b/internal/service/memorydb/tags_gen.go index b2b4b29a848..805f2ed38cc 100644 --- a/internal/service/memorydb/tags_gen.go +++ b/internal/service/memorydb/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*memorydb.Tag { // setTagsOut sets memorydb service tags in Context. func setTagsOut(ctx context.Context, tags []*memorydb.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/mq/tags_gen.go b/internal/service/mq/tags_gen.go index 4e8488c8a32..d890a723f62 100644 --- a/internal/service/mq/tags_gen.go +++ b/internal/service/mq/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets mq service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/mwaa/tags_gen.go b/internal/service/mwaa/tags_gen.go index 71263e1725e..5e79bd54039 100644 --- a/internal/service/mwaa/tags_gen.go +++ b/internal/service/mwaa/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets mwaa service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/neptune/tags_gen.go b/internal/service/neptune/tags_gen.go index 618e547b6b7..b6ce3a03005 100644 --- a/internal/service/neptune/tags_gen.go +++ b/internal/service/neptune/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*neptune.Tag { // setTagsOut sets neptune service tags in Context. func setTagsOut(ctx context.Context, tags []*neptune.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/networkfirewall/tags_gen.go b/internal/service/networkfirewall/tags_gen.go index f4211a4c0b0..7ef08be33ec 100644 --- a/internal/service/networkfirewall/tags_gen.go +++ b/internal/service/networkfirewall/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*networkfirewall.Tag { // setTagsOut sets networkfirewall service tags in Context. func setTagsOut(ctx context.Context, tags []*networkfirewall.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/networkmanager/tags_gen.go b/internal/service/networkmanager/tags_gen.go index a2c7b46b293..ce1d05dab75 100644 --- a/internal/service/networkmanager/tags_gen.go +++ b/internal/service/networkmanager/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*networkmanager.Tag { // setTagsOut sets networkmanager service tags in Context. func setTagsOut(ctx context.Context, tags []*networkmanager.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/oam/tags_gen.go b/internal/service/oam/tags_gen.go index e579d67a857..da14c0abf5e 100644 --- a/internal/service/oam/tags_gen.go +++ b/internal/service/oam/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets oam service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/opensearch/tags_gen.go b/internal/service/opensearch/tags_gen.go index 108032ec451..0a0fdf94e41 100644 --- a/internal/service/opensearch/tags_gen.go +++ b/internal/service/opensearch/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*opensearchservice.Tag { // setTagsOut sets opensearch service tags in Context. func setTagsOut(ctx context.Context, tags []*opensearchservice.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/opensearchserverless/tags_gen.go b/internal/service/opensearchserverless/tags_gen.go index 52d0cfe0521..0959339651a 100644 --- a/internal/service/opensearchserverless/tags_gen.go +++ b/internal/service/opensearchserverless/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets opensearchserverless service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/opsworks/tags_gen.go b/internal/service/opsworks/tags_gen.go index 61e61f6e89a..1e17d009ffb 100644 --- a/internal/service/opsworks/tags_gen.go +++ b/internal/service/opsworks/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets opsworks service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/organizations/tags_gen.go b/internal/service/organizations/tags_gen.go index bcd9d92a896..7bfa448b963 100644 --- a/internal/service/organizations/tags_gen.go +++ b/internal/service/organizations/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*organizations.Tag { // setTagsOut sets organizations service tags in Context. func setTagsOut(ctx context.Context, tags []*organizations.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/outposts/tags_gen.go b/internal/service/outposts/tags_gen.go index c7a2e0f8e7c..67620daa79d 100644 --- a/internal/service/outposts/tags_gen.go +++ b/internal/service/outposts/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets outposts service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/pinpoint/tags_gen.go b/internal/service/pinpoint/tags_gen.go index e79decfc135..a3761746b5d 100644 --- a/internal/service/pinpoint/tags_gen.go +++ b/internal/service/pinpoint/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets pinpoint service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/pipes/tags_gen.go b/internal/service/pipes/tags_gen.go index 65dde68db86..d1b1a2d7129 100644 --- a/internal/service/pipes/tags_gen.go +++ b/internal/service/pipes/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets pipes service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/qldb/tags_gen.go b/internal/service/qldb/tags_gen.go index cf5d6e3f1e6..494cf058b79 100644 --- a/internal/service/qldb/tags_gen.go +++ b/internal/service/qldb/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets qldb service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/quicksight/tags_gen.go b/internal/service/quicksight/tags_gen.go index d66c2caf3f0..a1f9bc0f683 100644 --- a/internal/service/quicksight/tags_gen.go +++ b/internal/service/quicksight/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*quicksight.Tag { // setTagsOut sets quicksight service tags in Context. func setTagsOut(ctx context.Context, tags []*quicksight.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ram/tags_gen.go b/internal/service/ram/tags_gen.go index 57bc470e769..15f5a63a5ef 100644 --- a/internal/service/ram/tags_gen.go +++ b/internal/service/ram/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*ram.Tag { // setTagsOut sets ram service tags in Context. func setTagsOut(ctx context.Context, tags []*ram.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/rbin/tags_gen.go b/internal/service/rbin/tags_gen.go index 66827ce6226..e028108f6f2 100644 --- a/internal/service/rbin/tags_gen.go +++ b/internal/service/rbin/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets rbin service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/rds/tags_gen.go b/internal/service/rds/tags_gen.go index 47bbb08841e..9fb5dde2fa2 100644 --- a/internal/service/rds/tags_gen.go +++ b/internal/service/rds/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*rds.Tag { // setTagsOut sets rds service tags in Context. func setTagsOut(ctx context.Context, tags []*rds.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/redshift/tags_gen.go b/internal/service/redshift/tags_gen.go index 248b6a6bf09..bd69ee3134b 100644 --- a/internal/service/redshift/tags_gen.go +++ b/internal/service/redshift/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*redshift.Tag { // setTagsOut sets redshift service tags in Context. func setTagsOut(ctx context.Context, tags []*redshift.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/redshiftserverless/tags_gen.go b/internal/service/redshiftserverless/tags_gen.go index 7c94bf8414a..89fce014c35 100644 --- a/internal/service/redshiftserverless/tags_gen.go +++ b/internal/service/redshiftserverless/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*redshiftserverless.Tag { // setTagsOut sets redshiftserverless service tags in Context. func setTagsOut(ctx context.Context, tags []*redshiftserverless.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/resourceexplorer2/tags_gen.go b/internal/service/resourceexplorer2/tags_gen.go index e5f83d67fbf..4ba431dba66 100644 --- a/internal/service/resourceexplorer2/tags_gen.go +++ b/internal/service/resourceexplorer2/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets resourceexplorer2 service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/resourcegroups/tags_gen.go b/internal/service/resourcegroups/tags_gen.go index a682271d1f9..286ba75c1e3 100644 --- a/internal/service/resourcegroups/tags_gen.go +++ b/internal/service/resourcegroups/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets resourcegroups service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/resourcegroupstaggingapi/tags_gen.go b/internal/service/resourcegroupstaggingapi/tags_gen.go index 7d1dc4079b4..1fe9ef63d19 100644 --- a/internal/service/resourcegroupstaggingapi/tags_gen.go +++ b/internal/service/resourcegroupstaggingapi/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets resourcegroupstaggingapi service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/rolesanywhere/tags_gen.go b/internal/service/rolesanywhere/tags_gen.go index d180e76d074..752563029cf 100644 --- a/internal/service/rolesanywhere/tags_gen.go +++ b/internal/service/rolesanywhere/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets rolesanywhere service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/route53/tags_gen.go b/internal/service/route53/tags_gen.go index a1f618838da..02c0db9fdb7 100644 --- a/internal/service/route53/tags_gen.go +++ b/internal/service/route53/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -44,7 +44,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -94,7 +94,7 @@ func getTagsIn(ctx context.Context) []*route53.Tag { // setTagsOut sets route53 service tags in Context. func setTagsOut(ctx context.Context, tags []*route53.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/route53domains/tags_gen.go b/internal/service/route53domains/tags_gen.go index ceacca9e6d1..e1dc646adcc 100644 --- a/internal/service/route53domains/tags_gen.go +++ b/internal/service/route53domains/tags_gen.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -63,7 +63,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -113,7 +113,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets route53domains service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/route53recoveryreadiness/tags_gen.go b/internal/service/route53recoveryreadiness/tags_gen.go index 6ac6929e2b9..59b10fda97a 100644 --- a/internal/service/route53recoveryreadiness/tags_gen.go +++ b/internal/service/route53recoveryreadiness/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets route53recoveryreadiness service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/route53resolver/tags_gen.go b/internal/service/route53resolver/tags_gen.go index 6938109922a..cf64400aa8f 100644 --- a/internal/service/route53resolver/tags_gen.go +++ b/internal/service/route53resolver/tags_gen.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -63,7 +63,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -113,7 +113,7 @@ func getTagsIn(ctx context.Context) []*route53resolver.Tag { // setTagsOut sets route53resolver service tags in Context. func setTagsOut(ctx context.Context, tags []*route53resolver.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/rum/tags_gen.go b/internal/service/rum/tags_gen.go index 6cef377156a..b6a4fef6f68 100644 --- a/internal/service/rum/tags_gen.go +++ b/internal/service/rum/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets rum service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/s3/tags_gen.go b/internal/service/s3/tags_gen.go index 4b1b0a5930f..a0af77904cf 100644 --- a/internal/service/s3/tags_gen.go +++ b/internal/service/s3/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []*s3.Tag { // setTagsOut sets s3 service tags in Context. func setTagsOut(ctx context.Context, tags []*s3.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/s3/tagsv2_gen.go b/internal/service/s3/tagsv2_gen.go index 10215b864dd..afebf66c6f9 100644 --- a/internal/service/s3/tagsv2_gen.go +++ b/internal/service/s3/tagsv2_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsInV2(ctx context.Context) []awstypes.Tag { // setTagsOutV2 sets s3 service tags in Context. func setTagsOutV2(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(keyValueTagsV2(ctx, tags)) + inContext.TagsOut = option.Some(keyValueTagsV2(ctx, tags)) } } diff --git a/internal/service/s3control/tags_gen.go b/internal/service/s3control/tags_gen.go index 6f7ad5ae734..73f609196b2 100644 --- a/internal/service/s3control/tags_gen.go +++ b/internal/service/s3control/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -44,7 +44,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -94,7 +94,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets s3control service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/s3control/tagss3_gen.go b/internal/service/s3control/tagss3_gen.go index 392c1a96c75..fd263478cea 100644 --- a/internal/service/s3control/tagss3_gen.go +++ b/internal/service/s3control/tagss3_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" awstypes "github.com/aws/aws-sdk-go-v2/service/s3control/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsInS3(ctx context.Context) []awstypes.S3Tag { // setTagsOutS3 sets s3control service tags in Context. func setTagsOutS3(ctx context.Context, tags []awstypes.S3Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(keyValueTagsS3(ctx, tags)) + inContext.TagsOut = option.Some(keyValueTagsS3(ctx, tags)) } } diff --git a/internal/service/sagemaker/tags_gen.go b/internal/service/sagemaker/tags_gen.go index a3bebc7668e..fe4c6376f45 100644 --- a/internal/service/sagemaker/tags_gen.go +++ b/internal/service/sagemaker/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*sagemaker.Tag { // setTagsOut sets sagemaker service tags in Context. func setTagsOut(ctx context.Context, tags []*sagemaker.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/scheduler/tags_gen.go b/internal/service/scheduler/tags_gen.go index 37e29093065..875f085cc85 100644 --- a/internal/service/scheduler/tags_gen.go +++ b/internal/service/scheduler/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets scheduler service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/schemas/tags_gen.go b/internal/service/schemas/tags_gen.go index 1206225447e..3e3950f2c0c 100644 --- a/internal/service/schemas/tags_gen.go +++ b/internal/service/schemas/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets schemas service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/secretsmanager/tags_gen.go b/internal/service/secretsmanager/tags_gen.go index 005ec0242fe..5ebd56f1245 100644 --- a/internal/service/secretsmanager/tags_gen.go +++ b/internal/service/secretsmanager/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -60,7 +60,7 @@ func getTagsIn(ctx context.Context) []*secretsmanager.Tag { // setTagsOut sets secretsmanager service tags in Context. func setTagsOut(ctx context.Context, tags []*secretsmanager.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/securityhub/tags_gen.go b/internal/service/securityhub/tags_gen.go index 91434191991..ac8e8d70936 100644 --- a/internal/service/securityhub/tags_gen.go +++ b/internal/service/securityhub/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets securityhub service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/securitylake/tags_gen.go b/internal/service/securitylake/tags_gen.go index 9373d7c9c84..b93124387c8 100644 --- a/internal/service/securitylake/tags_gen.go +++ b/internal/service/securitylake/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets securitylake service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/serverlessrepo/tags_gen.go b/internal/service/serverlessrepo/tags_gen.go index 109d9bd9882..57f84930475 100644 --- a/internal/service/serverlessrepo/tags_gen.go +++ b/internal/service/serverlessrepo/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/serverlessapplicationrepository" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []*serverlessapplicationrepository.Tag { // setTagsOut sets serverlessrepo service tags in Context. func setTagsOut(ctx context.Context, tags []*serverlessapplicationrepository.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/servicecatalog/tags_gen.go b/internal/service/servicecatalog/tags_gen.go index 92386011e0c..733146abd1f 100644 --- a/internal/service/servicecatalog/tags_gen.go +++ b/internal/service/servicecatalog/tags_gen.go @@ -7,7 +7,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/servicecatalog" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) // []*SERVICE.Tag handling @@ -54,6 +54,6 @@ func getTagsIn(ctx context.Context) []*servicecatalog.Tag { // setTagsOut sets servicecatalog service tags in Context. func setTagsOut(ctx context.Context, tags []*servicecatalog.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/servicediscovery/tags_gen.go b/internal/service/servicediscovery/tags_gen.go index efcb4e2e37c..77e4e8e7d17 100644 --- a/internal/service/servicediscovery/tags_gen.go +++ b/internal/service/servicediscovery/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*servicediscovery.Tag { // setTagsOut sets servicediscovery service tags in Context. func setTagsOut(ctx context.Context, tags []*servicediscovery.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/sesv2/tags_gen.go b/internal/service/sesv2/tags_gen.go index b009f7b8e41..0c92211ca23 100644 --- a/internal/service/sesv2/tags_gen.go +++ b/internal/service/sesv2/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets sesv2 service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/sfn/tags_gen.go b/internal/service/sfn/tags_gen.go index b5baaf5fd8e..e05a11c1277 100644 --- a/internal/service/sfn/tags_gen.go +++ b/internal/service/sfn/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*sfn.Tag { // setTagsOut sets sfn service tags in Context. func setTagsOut(ctx context.Context, tags []*sfn.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/shield/tags_gen.go b/internal/service/shield/tags_gen.go index ef63675ea32..9c979fcdead 100644 --- a/internal/service/shield/tags_gen.go +++ b/internal/service/shield/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*shield.Tag { // setTagsOut sets shield service tags in Context. func setTagsOut(ctx context.Context, tags []*shield.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/signer/tags_gen.go b/internal/service/signer/tags_gen.go index 597850260c6..ef0b5513900 100644 --- a/internal/service/signer/tags_gen.go +++ b/internal/service/signer/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets signer service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/sns/tags_gen.go b/internal/service/sns/tags_gen.go index 4babb3b995b..c8e34e4324a 100644 --- a/internal/service/sns/tags_gen.go +++ b/internal/service/sns/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets sns service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/sqs/tags_gen.go b/internal/service/sqs/tags_gen.go index d334498a810..7545ba066bc 100644 --- a/internal/service/sqs/tags_gen.go +++ b/internal/service/sqs/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets sqs service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ssm/tags_gen.go b/internal/service/ssm/tags_gen.go index 3a27c7ef308..bb532d2fdb8 100644 --- a/internal/service/ssm/tags_gen.go +++ b/internal/service/ssm/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -44,7 +44,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -94,7 +94,7 @@ func getTagsIn(ctx context.Context) []*ssm.Tag { // setTagsOut sets ssm service tags in Context. func setTagsOut(ctx context.Context, tags []*ssm.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ssmcontacts/tags_gen.go b/internal/service/ssmcontacts/tags_gen.go index aee2da12a99..65264291891 100644 --- a/internal/service/ssmcontacts/tags_gen.go +++ b/internal/service/ssmcontacts/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets ssmcontacts service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ssmincidents/tags_gen.go b/internal/service/ssmincidents/tags_gen.go index 14bd132c95c..bacb2daa8d8 100644 --- a/internal/service/ssmincidents/tags_gen.go +++ b/internal/service/ssmincidents/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets ssmincidents service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/ssoadmin/tags_gen.go b/internal/service/ssoadmin/tags_gen.go index 08a4c71d810..52af68718a0 100644 --- a/internal/service/ssoadmin/tags_gen.go +++ b/internal/service/ssoadmin/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -44,7 +44,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier, res } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -94,7 +94,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets ssoadmin service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/storagegateway/tags_gen.go b/internal/service/storagegateway/tags_gen.go index d92eba0c188..6b49703edee 100644 --- a/internal/service/storagegateway/tags_gen.go +++ b/internal/service/storagegateway/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*storagegateway.Tag { // setTagsOut sets storagegateway service tags in Context. func setTagsOut(ctx context.Context, tags []*storagegateway.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/swf/tags_gen.go b/internal/service/swf/tags_gen.go index 108b89f699b..3f7a01a3fc7 100644 --- a/internal/service/swf/tags_gen.go +++ b/internal/service/swf/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.ResourceTag { // setTagsOut sets swf service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.ResourceTag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/synthetics/tags_gen.go b/internal/service/synthetics/tags_gen.go index 509a6d11d19..70300311969 100644 --- a/internal/service/synthetics/tags_gen.go +++ b/internal/service/synthetics/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets synthetics service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/timestreamwrite/tags_gen.go b/internal/service/timestreamwrite/tags_gen.go index 5e9bd1f8571..b42e144cd95 100644 --- a/internal/service/timestreamwrite/tags_gen.go +++ b/internal/service/timestreamwrite/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets timestreamwrite service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/transcribe/tags_gen.go b/internal/service/transcribe/tags_gen.go index b8c34ab5612..c35ebcb99ae 100644 --- a/internal/service/transcribe/tags_gen.go +++ b/internal/service/transcribe/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets transcribe service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/transfer/tags_gen.go b/internal/service/transfer/tags_gen.go index bc053b180e4..e243b288652 100644 --- a/internal/service/transfer/tags_gen.go +++ b/internal/service/transfer/tags_gen.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -63,7 +63,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -113,7 +113,7 @@ func getTagsIn(ctx context.Context) []*transfer.Tag { // setTagsOut sets transfer service tags in Context. func setTagsOut(ctx context.Context, tags []*transfer.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/vpclattice/tags_gen.go b/internal/service/vpclattice/tags_gen.go index 4664ccc439c..43af4aaa0cf 100644 --- a/internal/service/vpclattice/tags_gen.go +++ b/internal/service/vpclattice/tags_gen.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -42,7 +42,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -75,7 +75,7 @@ func getTagsIn(ctx context.Context) map[string]string { // setTagsOut sets vpclattice service tags in Context. func setTagsOut(ctx context.Context, tags map[string]string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/waf/tags_gen.go b/internal/service/waf/tags_gen.go index 32d8eaa151c..6e1b02a20de 100644 --- a/internal/service/waf/tags_gen.go +++ b/internal/service/waf/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*waf.Tag { // setTagsOut sets waf service tags in Context. func setTagsOut(ctx context.Context, tags []*waf.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/wafregional/tags_gen.go b/internal/service/wafregional/tags_gen.go index b38eb8154b5..768e76acd8f 100644 --- a/internal/service/wafregional/tags_gen.go +++ b/internal/service/wafregional/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*waf.Tag { // setTagsOut sets wafregional service tags in Context. func setTagsOut(ctx context.Context, tags []*waf.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/wafv2/tags_gen.go b/internal/service/wafv2/tags_gen.go index 1f097716177..53127a9d190 100644 --- a/internal/service/wafv2/tags_gen.go +++ b/internal/service/wafv2/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []*wafv2.Tag { // setTagsOut sets wafv2 service tags in Context. func setTagsOut(ctx context.Context, tags []*wafv2.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/worklink/tags_gen.go b/internal/service/worklink/tags_gen.go index e7ccf2d6064..a1d41f474ce 100644 --- a/internal/service/worklink/tags_gen.go +++ b/internal/service/worklink/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -76,7 +76,7 @@ func getTagsIn(ctx context.Context) map[string]*string { // setTagsOut sets worklink service tags in Context. func setTagsOut(ctx context.Context, tags map[string]*string) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/workspaces/tags_gen.go b/internal/service/workspaces/tags_gen.go index abd954b396c..c06abd55fd2 100644 --- a/internal/service/workspaces/tags_gen.go +++ b/internal/service/workspaces/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets workspaces service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } diff --git a/internal/service/xray/tags_gen.go b/internal/service/xray/tags_gen.go index 65818fa03e1..338efc55932 100644 --- a/internal/service/xray/tags_gen.go +++ b/internal/service/xray/tags_gen.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/logging" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -43,7 +43,7 @@ func (p *servicePackage) ListTags(ctx context.Context, meta any, identifier stri } if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return nil @@ -93,7 +93,7 @@ func getTagsIn(ctx context.Context) []awstypes.Tag { // setTagsOut sets xray service tags in Context. func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = option.Some(KeyValueTags(ctx, tags)) } } From 5060bd23f6ac1f888f6d6d4cf42423f3f57c676d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:43:54 -0500 Subject: [PATCH 275/438] provider: 'internal/option' -> 'internal/types/option'. --- internal/provider/fwprovider/intercept.go | 5 +++-- internal/provider/intercept.go | 5 +++-- internal/provider/tags_interceptor_test.go | 3 ++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/internal/provider/fwprovider/intercept.go b/internal/provider/fwprovider/intercept.go index ef97b51c230..3c93454fb2a 100644 --- a/internal/provider/fwprovider/intercept.go +++ b/internal/provider/fwprovider/intercept.go @@ -18,6 +18,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -348,7 +349,7 @@ func (r tagsResourceInterceptor) create(ctx context.Context, request resource.Cr // Remove system tags. tags = tags.IgnoreSystem(inContext.ServicePackageName) - tagsInContext.TagsIn = types.Some(tags) + tagsInContext.TagsIn = option.Some(tags) case After: // Set values for unknowns. // Remove any provider configured ignore_tags and system tags from those passed to the service API. @@ -513,7 +514,7 @@ func (r tagsResourceInterceptor) update(ctx context.Context, request resource.Up // Remove system tags. tags = tags.IgnoreSystem(inContext.ServicePackageName) - tagsInContext.TagsIn = types.Some(tags) + tagsInContext.TagsIn = option.Some(tags) var oldTagsAll, newTagsAll fwtypes.Map diff --git a/internal/provider/intercept.go b/internal/provider/intercept.go index f73190ce169..7a5ad365f42 100644 --- a/internal/provider/intercept.go +++ b/internal/provider/intercept.go @@ -17,6 +17,7 @@ import ( tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" "github.com/hashicorp/terraform-provider-aws/names" ) @@ -241,7 +242,7 @@ func (r tagsResourceInterceptor) run(ctx context.Context, d schemaResourceData, // Remove system tags. tags = tags.IgnoreSystem(inContext.ServicePackageName) - tagsInContext.TagsIn = types.Some(tags) + tagsInContext.TagsIn = option.Some(tags) if why == Create { break @@ -413,7 +414,7 @@ func (r tagsDataSourceInterceptor) run(ctx context.Context, d schemaResourceData case Read: // Get the data source's configured tags. tags := tftags.New(ctx, d.Get(names.AttrTags).(map[string]interface{})) - tagsInContext.TagsIn = types.Some(tags) + tagsInContext.TagsIn = option.Some(tags) } case After: // Set tags and tags_all in state after CRU. diff --git a/internal/provider/tags_interceptor_test.go b/internal/provider/tags_interceptor_test.go index feb3bccb56b..c021fe9c056 100644 --- a/internal/provider/tags_interceptor_test.go +++ b/internal/provider/tags_interceptor_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/types" + "github.com/hashicorp/terraform-provider-aws/internal/types/option" ) type mockService struct{} @@ -42,7 +43,7 @@ func (t *mockService) ListTags(ctx context.Context, meta any, identifier string) "tag1": "value1", }) if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(tags) + inContext.TagsOut = option.Some(tags) } return errors.New("test error") From 4dcf39cabb2df61e0527a1b9db29fdfbfa867a1b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 16:54:00 -0500 Subject: [PATCH 276/438] r/aws_lb_target_group_attachment: Tidy up Read. --- .../service/elbv2/target_group_attachment.go | 101 ++++++++++-------- 1 file changed, 57 insertions(+), 44 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 429a6725277..30be210e8df 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" @@ -92,66 +93,47 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in var diags diag.Diagnostics conn := meta.(*conns.AWSClient).ELBV2Conn(ctx) - target := &elbv2.TargetDescription{ - Id: aws.String(d.Get("target_id").(string)), - } - - if v, ok := d.GetOk("port"); ok { - target.Port = aws.Int64(int64(v.(int))) + targetGroupARN := d.Get("target_group_arn").(string) + input := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(targetGroupARN), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(d.Get("target_id").(string)), + }}, } if v, ok := d.GetOk("availability_zone"); ok { - target.AvailabilityZone = aws.String(v.(string)) + input.Targets[0].AvailabilityZone = aws.String(v.(string)) } - resp, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(d.Get("target_group_arn").(string)), - Targets: []*elbv2.TargetDescription{target}, - }) - - if err != nil { - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - log.Printf("[WARN] Target group does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return diags - } - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException) { - log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return diags - } - return sdkdiag.AppendErrorf(diags, "reading Target Health: %s", err) + if v, ok := d.GetOk("port"); ok { + input.Targets[0].Port = aws.Int64(int64(v.(int))) } - for _, targetDesc := range resp.TargetHealthDescriptions { - if targetDesc == nil || targetDesc.Target == nil { - continue - } + output, err := FindTargetHealthDescription(ctx, conn, input) - if aws.StringValue(targetDesc.Target.Id) == d.Get("target_id").(string) { - // These will catch targets being removed by hand (draining as we plan) or that have been removed for a while - // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the - // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. - if targetDesc.TargetHealth == nil { - continue - } + if !d.IsNewResource() && tfresource.NotFound(err) { + log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) + d.SetId("") + return diags + } - reason := aws.StringValue(targetDesc.TargetHealth.Reason) + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group Attachment (%s): %s", d.Id(), err) + } - if reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { - log.Printf("[WARN] Target Attachment does not exist, recreating attachment") + // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while + // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the + // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. + if v := output.TargetHealth; v != nil { + if reason := aws.StringValue(v.Reason); reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { + if !d.IsNewResource() { + log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) d.SetId("") return diags } } } - if len(resp.TargetHealthDescriptions) != 1 { - log.Printf("[WARN] Target does not exist, removing target attachment %s", d.Id()) - d.SetId("") - return diags - } - return diags } @@ -188,3 +170,34 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta return diags } + +func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) (*elbv2.TargetHealthDescription, error) { + output, err := findTargetHealthDescriptions(ctx, conn, input) + + if err != nil { + return nil, err + } + + return tfresource.AssertSinglePtrResult(output) +} + +func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) ([]*elbv2.TargetHealthDescription, error) { + output, err := conn.DescribeTargetHealthWithContext(ctx, input) + + if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException, elbv2.ErrCodeTargetGroupNotFoundException) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.TargetHealthDescriptions, nil +} From 2f620b8efaea84cc25333f477bf1efca3815433f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:14:16 -0500 Subject: [PATCH 277/438] stack.Peek() and Pop() return Option. --- .../experimental/depgraph/dependency_graph.go | 2 +- internal/types/stack/stack.go | 18 ++++++++++-------- internal/types/stack/stack_test.go | 14 +++++++------- 3 files changed, 18 insertions(+), 16 deletions(-) diff --git a/internal/experimental/depgraph/dependency_graph.go b/internal/experimental/depgraph/dependency_graph.go index e599f122059..1b877b17092 100644 --- a/internal/experimental/depgraph/dependency_graph.go +++ b/internal/experimental/depgraph/dependency_graph.go @@ -212,7 +212,7 @@ func depthFirstSearch(edges map[string][]string) func(s string) ([]string, error }) for todo.Len() > 0 { - current := todo.Peek() + current := todo.Peek().MustUnwrap() node := current.node if !current.processed { diff --git a/internal/types/stack/stack.go b/internal/types/stack/stack.go index 72f1fdbb2e5..b74a71b2f0d 100644 --- a/internal/types/stack/stack.go +++ b/internal/types/stack/stack.go @@ -3,6 +3,10 @@ package stack +import ( + "github.com/hashicorp/terraform-provider-aws/internal/types/option" +) + type stack[T any] struct { top *stackNode[T] length int @@ -24,27 +28,25 @@ func (s *stack[T]) Len() int { } // Peek returns the top item on the stack. -func (s *stack[T]) Peek() T { +func (s *stack[T]) Peek() option.Option[T] { if s.length == 0 { - var zero T - return zero + return option.None[T]() } - return s.top.value + return option.Some(s.top.value) } // Pop returns the top item on the stack and removes it from the stack. -func (s *stack[T]) Pop() T { +func (s *stack[T]) Pop() option.Option[T] { if s.length == 0 { - var zero T - return zero + return option.None[T]() } top := s.top s.top = top.prev s.length-- - return top.value + return option.Some(top.value) } // Push puts the specified item on the top of the stack. diff --git a/internal/types/stack/stack_test.go b/internal/types/stack/stack_test.go index 6a912e02a63..99a80f906ea 100644 --- a/internal/types/stack/stack_test.go +++ b/internal/types/stack/stack_test.go @@ -16,11 +16,11 @@ func TestStack(t *testing.T) { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.Peek(), 0; got != expected { + if got, expected := s.Peek().IsNone(), true; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Pop(), 0; got != expected { + if got, expected := s.Pop().IsSome(), false; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } @@ -30,11 +30,11 @@ func TestStack(t *testing.T) { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.Peek(), 1; got != expected { + if got, expected := s.Peek().MustUnwrap(), 1; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Pop(), 1; got != expected { + if got, expected := s.Pop().MustUnwrap(), 1; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } @@ -49,15 +49,15 @@ func TestStack(t *testing.T) { t.Fatalf("incorrect length. Expected: %d, got: %d", expected, got) } - if got, expected := s.Peek(), 3; got != expected { + if got, expected := s.Peek().MustUnwrap(), 3; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Pop(), 3; got != expected { + if got, expected := s.Pop().MustUnwrap(), 3; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } - if got, expected := s.Peek(), 2; got != expected { + if got, expected := s.Peek().MustUnwrap(), 2; got != expected { t.Fatalf("incorrect value. Expected: %v, got: %v", expected, got) } } From 767fd802893c4d960f87d03d8eeb5f3c49821a26 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:21:01 -0500 Subject: [PATCH 278/438] r/aws_lb_target_group_attachment: Tidy up Read. --- .../service/elbv2/target_group_attachment.go | 44 ++++++++++++------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment.go b/internal/service/elbv2/target_group_attachment.go index 30be210e8df..d58adbf1a31 100644 --- a/internal/service/elbv2/target_group_attachment.go +++ b/internal/service/elbv2/target_group_attachment.go @@ -17,6 +17,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) @@ -109,7 +110,7 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in input.Targets[0].Port = aws.Int64(int64(v.(int))) } - output, err := FindTargetHealthDescription(ctx, conn, input) + _, err := FindTargetHealthDescription(ctx, conn, input) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) @@ -121,19 +122,6 @@ func resourceAttachmentRead(ctx context.Context, d *schema.ResourceData, meta in return sdkdiag.AppendErrorf(diags, "reading ELBv2 Target Group Attachment (%s): %s", d.Id(), err) } - // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while - // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the - // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. - if v := output.TargetHealth; v != nil { - if reason := aws.StringValue(v.Reason); reason == elbv2.TargetHealthReasonEnumTargetNotRegistered || reason == elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress { - if !d.IsNewResource() { - log.Printf("[WARN] ELBv2 Target Group Attachment %s not found, removing from state", d.Id()) - d.SetId("") - return diags - } - } - } - return diags } @@ -172,7 +160,21 @@ func resourceAttachmentDelete(ctx context.Context, d *schema.ResourceData, meta } func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) (*elbv2.TargetHealthDescription, error) { - output, err := findTargetHealthDescriptions(ctx, conn, input) + output, err := findTargetHealthDescriptions(ctx, conn, input, func(v *elbv2.TargetHealthDescription) bool { + // This will catch targets being removed by hand (draining as we plan) or that have been removed for a while + // without trying to re-create ones that are just not in use. For example, a target can be `unused` if the + // target group isnt assigned to anything, a scenario where we don't want to continuously recreate the resource. + if v := v.TargetHealth; v != nil { + switch reason := aws.StringValue(v.Reason); reason { + case elbv2.TargetHealthReasonEnumTargetDeregistrationInProgress, elbv2.TargetHealthReasonEnumTargetNotRegistered: + return false + default: + return true + } + } + + return false + }) if err != nil { return nil, err @@ -181,7 +183,9 @@ func FindTargetHealthDescription(ctx context.Context, conn *elbv2.ELBV2, input * return tfresource.AssertSinglePtrResult(output) } -func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput) ([]*elbv2.TargetHealthDescription, error) { +func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input *elbv2.DescribeTargetHealthInput, filter tfslices.Predicate[*elbv2.TargetHealthDescription]) ([]*elbv2.TargetHealthDescription, error) { + var targetHealthDescriptions []*elbv2.TargetHealthDescription + output, err := conn.DescribeTargetHealthWithContext(ctx, input) if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException, elbv2.ErrCodeTargetGroupNotFoundException) { @@ -199,5 +203,11 @@ func findTargetHealthDescriptions(ctx context.Context, conn *elbv2.ELBV2, input return nil, tfresource.NewEmptyResultError(input) } - return output.TargetHealthDescriptions, nil + for _, v := range output.TargetHealthDescriptions { + if v != nil && filter(v) { + targetHealthDescriptions = append(targetHealthDescriptions, v) + } + } + + return targetHealthDescriptions, nil } From f8592d66ad5e102bd778a8a8c467859da00bf13a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:30:14 -0500 Subject: [PATCH 279/438] Add 'flex.StringValueToInt64'. --- internal/flex/flex.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/internal/flex/flex.go b/internal/flex/flex.go index 171c2e120bc..f7e12d1d02b 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -317,6 +317,13 @@ func StringToIntValue(v *string) int { return i } +// StringValueToInt64 converts a string to a Go int64 pointer value. +// Invalid integer strings are converted to 0. +func StringValueToInt64(v string) *int64 { + i, _ := strconv.Atoi(v) + return aws.Int64(int64(i)) +} + // Takes a string of resource attributes separated by the ResourceIdSeparator constant // returns the number of parts func ResourceIdPartCount(id string) int { From 06c960ca8678827b5e1e779ae2d31dfe6c367886 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 17:37:59 -0500 Subject: [PATCH 280/438] r/aws_lb_target_group_attachment: Tidy up acceptance tests. --- .../elbv2/target_group_attachment_test.go | 225 ++++++------------ 1 file changed, 73 insertions(+), 152 deletions(-) diff --git a/internal/service/elbv2/target_group_attachment_test.go b/internal/service/elbv2/target_group_attachment_test.go index 7412ac9f716..521a6849781 100644 --- a/internal/service/elbv2/target_group_attachment_test.go +++ b/internal/service/elbv2/target_group_attachment_test.go @@ -5,24 +5,25 @@ package elbv2_test import ( "context" - "errors" "fmt" - "strconv" "testing" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/elbv2" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" "github.com/hashicorp/terraform-provider-aws/internal/acctest" "github.com/hashicorp/terraform-provider-aws/internal/conns" + "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfelbv2 "github.com/hashicorp/terraform-provider-aws/internal/service/elbv2" + "github.com/hashicorp/terraform-provider-aws/internal/tfresource" ) func TestAccELBV2TargetGroupAttachment_basic(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -31,9 +32,9 @@ func TestAccELBV2TargetGroupAttachment_basic(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idInstance(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idInstance(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -42,7 +43,9 @@ func TestAccELBV2TargetGroupAttachment_basic(t *testing.T) { func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" + resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, ErrorCheck: acctest.ErrorCheck(t, elbv2.EndpointsID), @@ -50,10 +53,10 @@ func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idInstance(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idInstance(rName), Check: resource.ComposeTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), - testAccCheckTargetGroupAttachmentDisappears(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceTargetGroup(), resourceName), ), ExpectNonEmptyPlan: true, }, @@ -63,7 +66,8 @@ func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { func TestAccELBV2TargetGroupAttachment_backwardsCompatibility(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_alb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -72,9 +76,9 @@ func TestAccELBV2TargetGroupAttachment_backwardsCompatibility(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_backwardsCompatibility(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_backwardsCompatibility(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_alb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -83,7 +87,8 @@ func TestAccELBV2TargetGroupAttachment_backwardsCompatibility(t *testing.T) { func TestAccELBV2TargetGroupAttachment_port(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -92,9 +97,9 @@ func TestAccELBV2TargetGroupAttachment_port(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_port(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_port(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -103,7 +108,8 @@ func TestAccELBV2TargetGroupAttachment_port(t *testing.T) { func TestAccELBV2TargetGroupAttachment_ipAddress(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -112,9 +118,9 @@ func TestAccELBV2TargetGroupAttachment_ipAddress(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idIPAddress(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idIPAddress(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, @@ -123,7 +129,8 @@ func TestAccELBV2TargetGroupAttachment_ipAddress(t *testing.T) { func TestAccELBV2TargetGroupAttachment_lambda(t *testing.T) { ctx := acctest.Context(t) - targetGroupName := fmt.Sprintf("test-target-group-%s", sdkacctest.RandString(10)) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lb_target_group_attachment.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, @@ -132,49 +139,15 @@ func TestAccELBV2TargetGroupAttachment_lambda(t *testing.T) { CheckDestroy: testAccCheckTargetGroupAttachmentDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccTargetGroupAttachmentConfig_idLambda(targetGroupName), + Config: testAccTargetGroupAttachmentConfig_idLambda(rName), Check: resource.ComposeAggregateTestCheckFunc( - testAccCheckTargetGroupAttachmentExists(ctx, "aws_lb_target_group_attachment.test"), + testAccCheckTargetGroupAttachmentExists(ctx, resourceName), ), }, }, }) } -func testAccCheckTargetGroupAttachmentDisappears(ctx context.Context, n string) resource.TestCheckFunc { - return func(s *terraform.State) error { - rs, ok := s.RootModule().Resources[n] - if !ok { - return fmt.Errorf("Attachment not found: %s", n) - } - - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) - targetGroupArn := rs.Primary.Attributes["target_group_arn"] - - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), - } - - _, hasPort := rs.Primary.Attributes["port"] - if hasPort { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) - } - - params := &elbv2.DeregisterTargetsInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - } - - _, err := conn.DeregisterTargetsWithContext(ctx, params) - if err != nil && !tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) { - return fmt.Errorf("Error deregistering Targets: %s", err) - } - - return err - } -} - func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] @@ -182,37 +155,26 @@ func testAccCheckTargetGroupAttachmentExists(ctx context.Context, n string) reso return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return errors.New("No Target Group Attachment ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).ELBV2Conn(ctx) - _, hasPort := rs.Primary.Attributes["port"] - targetGroupArn := rs.Primary.Attributes["target_group_arn"] - - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), - } - if hasPort { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) + input := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(rs.Primary.Attributes["target_id"]), + }}, } - describe, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - }) - - if err != nil { - return err + if v := rs.Primary.Attributes["availability_zone"]; v != "" { + input.Targets[0].AvailabilityZone = aws.String(v) } - if len(describe.TargetHealthDescriptions) != 1 { - return errors.New("Target Group Attachment not found") + if v := rs.Primary.Attributes["port"]; v != "" { + input.Targets[0].Port = flex.StringValueToInt64(v) } - return nil + _, err := tfelbv2.FindTargetHealthDescription(ctx, conn, input) + + return err } } @@ -225,95 +187,54 @@ func testAccCheckTargetGroupAttachmentDestroy(ctx context.Context) resource.Test continue } - _, hasPort := rs.Primary.Attributes["port"] - targetGroupArn := rs.Primary.Attributes["target_group_arn"] + input := &elbv2.DescribeTargetHealthInput{ + TargetGroupArn: aws.String(rs.Primary.Attributes["target_group_arn"]), + Targets: []*elbv2.TargetDescription{{ + Id: aws.String(rs.Primary.Attributes["target_id"]), + }}, + } - target := &elbv2.TargetDescription{ - Id: aws.String(rs.Primary.Attributes["target_id"]), + if v := rs.Primary.Attributes["availability_zone"]; v != "" { + input.Targets[0].AvailabilityZone = aws.String(v) } - if hasPort { - port, _ := strconv.Atoi(rs.Primary.Attributes["port"]) - target.Port = aws.Int64(int64(port)) + + if v := rs.Primary.Attributes["port"]; v != "" { + input.Targets[0].Port = flex.StringValueToInt64(v) } - describe, err := conn.DescribeTargetHealthWithContext(ctx, &elbv2.DescribeTargetHealthInput{ - TargetGroupArn: aws.String(targetGroupArn), - Targets: []*elbv2.TargetDescription{target}, - }) - if err == nil { - if len(describe.TargetHealthDescriptions) != 0 { - return fmt.Errorf("Target Group Attachment %q still exists", rs.Primary.ID) - } + _, err := tfelbv2.FindTargetHealthDescription(ctx, conn, input) + + if tfresource.NotFound(err) { + continue } - // Verify the error - if tfawserr.ErrCodeEquals(err, elbv2.ErrCodeTargetGroupNotFoundException) || tfawserr.ErrCodeEquals(err, elbv2.ErrCodeInvalidTargetException) { - return nil - } else { - return fmt.Errorf("Unexpected error checking LB destroyed: %s", err) + if err != nil { + return err } + + return fmt.Errorf("ELBv2 Target Group Attachment %s still exists", rs.Primary.ID) } return nil } } -func testAccTargetGroupAttachmentInstanceBaseConfig() string { - return ` -data "aws_availability_zones" "available" { - # t2.micro instance type is not available in these Availability Zones - exclude_zone_ids = ["usw2-az4"] - state = "available" - - filter { - name = "opt-in-status" - values = ["opt-in-not-required"] - } -} - -data "aws_ami" "amzn-ami-minimal-hvm-ebs" { - most_recent = true - owners = ["amazon"] - - filter { - name = "name" - values = ["amzn-ami-minimal-hvm-*"] - } - - filter { - name = "root-device-type" - values = ["ebs"] - } -} - +func testAccTargetGroupAttachmentCongig_baseEC2Instance(rName string) string { + return acctest.ConfigCompose(acctest.ConfigLatestAmazonLinuxHVMEBSAMI(), acctest.ConfigVPCWithSubnets(rName, 1), fmt.Sprintf(` resource "aws_instance" "test" { ami = data.aws_ami.amzn-ami-minimal-hvm-ebs.id instance_type = "t2.micro" - subnet_id = aws_subnet.test.id -} - -resource "aws_subnet" "test" { - availability_zone = data.aws_availability_zones.available.names[0] - cidr_block = "10.0.1.0/24" - vpc_id = aws_vpc.test.id - - tags = { - Name = "tf-acc-test-lb-target-group-attachment" - } -} - -resource "aws_vpc" "test" { - cidr_block = "10.0.0.0/16" + subnet_id = aws_subnet.test[0].id tags = { - Name = "tf-acc-test-lb-target-group-attachment" + Name = %[1]q } } -` +`, rName)) } func testAccTargetGroupAttachmentConfig_idInstance(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -325,11 +246,11 @@ resource "aws_lb_target_group_attachment" "test" { target_group_arn = aws_lb_target_group.test.arn target_id = aws_instance.test.id } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_port(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -342,11 +263,11 @@ resource "aws_lb_target_group_attachment" "test" { target_id = aws_instance.test.id port = 80 } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_backwardsCompatibility(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -359,11 +280,11 @@ resource "aws_alb_target_group_attachment" "test" { target_id = aws_instance.test.id port = 80 } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_idIPAddress(rName string) string { - return testAccTargetGroupAttachmentInstanceBaseConfig() + fmt.Sprintf(` + return acctest.ConfigCompose(testAccTargetGroupAttachmentCongig_baseEC2Instance(rName), fmt.Sprintf(` resource "aws_lb_target_group" "test" { name = %[1]q port = 443 @@ -377,7 +298,7 @@ resource "aws_lb_target_group_attachment" "test" { target_group_arn = aws_lb_target_group.test.arn target_id = aws_instance.test.private_ip } -`, rName) +`, rName)) } func testAccTargetGroupAttachmentConfig_idLambda(rName string) string { From f05bbfd6eaa19fd662347211d617abb2897d0aed Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 15 Dec 2023 18:13:46 -0500 Subject: [PATCH 281/438] Fix 'TestAccELBV2TargetGroupAttachment_disappears'. --- internal/service/elbv2/target_group_attachment_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/elbv2/target_group_attachment_test.go b/internal/service/elbv2/target_group_attachment_test.go index 521a6849781..85ad386a05b 100644 --- a/internal/service/elbv2/target_group_attachment_test.go +++ b/internal/service/elbv2/target_group_attachment_test.go @@ -56,7 +56,7 @@ func TestAccELBV2TargetGroupAttachment_disappears(t *testing.T) { Config: testAccTargetGroupAttachmentConfig_idInstance(rName), Check: resource.ComposeTestCheckFunc( testAccCheckTargetGroupAttachmentExists(ctx, resourceName), - acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceTargetGroup(), resourceName), + acctest.CheckResourceDisappears(ctx, acctest.Provider, tfelbv2.ResourceTargetGroupAttachment(), resourceName), ), ExpectNonEmptyPlan: true, }, From c4c1eb09809eada6d0d71f288e3c21c4b6d65ee0 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Fri, 15 Dec 2023 17:21:03 -0600 Subject: [PATCH 282/438] fix test failures --- internal/service/memorydb/cluster_test.go | 74 +++++++++++------------ 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/internal/service/memorydb/cluster_test.go b/internal/service/memorydb/cluster_test.go index 8dac81c4c76..d8112b172fc 100644 --- a/internal/service/memorydb/cluster_test.go +++ b/internal/service/memorydb/cluster_test.go @@ -21,7 +21,7 @@ import ( func TestAccMemoryDBCluster_basic(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -49,7 +49,7 @@ func TestAccMemoryDBCluster_basic(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "node_type", "db.t4g.small"), resource.TestCheckResourceAttr(resourceName, "num_replicas_per_shard", "1"), resource.TestCheckResourceAttr(resourceName, "num_shards", "2"), - resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.memorydb-redis6"), + resource.TestCheckResourceAttrSet(resourceName, "parameter_group_name"), resource.TestCheckResourceAttr(resourceName, "port", "6379"), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "1"), resource.TestCheckTypeSetElemAttrPair(resourceName, "security_group_ids.*", "aws_security_group.test", "id"), @@ -83,7 +83,7 @@ func TestAccMemoryDBCluster_basic(t *testing.T) { func TestAccMemoryDBCluster_defaults(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -111,7 +111,7 @@ func TestAccMemoryDBCluster_defaults(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "node_type", "db.t4g.small"), resource.TestCheckResourceAttr(resourceName, "num_replicas_per_shard", "1"), resource.TestCheckResourceAttr(resourceName, "num_shards", "1"), - resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.memorydb-redis6"), + resource.TestCheckResourceAttrSet(resourceName, "parameter_group_name"), resource.TestCheckResourceAttr(resourceName, "port", "6379"), resource.TestCheckResourceAttr(resourceName, "security_group_ids.#", "0"), resource.TestCheckResourceAttr(resourceName, "snapshot_retention_limit", "0"), @@ -133,7 +133,7 @@ func TestAccMemoryDBCluster_defaults(t *testing.T) { func TestAccMemoryDBCluster_disappears(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -156,7 +156,7 @@ func TestAccMemoryDBCluster_disappears(t *testing.T) { func TestAccMemoryDBCluster_nameGenerated(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -179,7 +179,7 @@ func TestAccMemoryDBCluster_nameGenerated(t *testing.T) { func TestAccMemoryDBCluster_namePrefix(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -204,7 +204,7 @@ func TestAccMemoryDBCluster_namePrefix(t *testing.T) { func TestAccMemoryDBCluster_create_noTLS(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -231,7 +231,7 @@ func TestAccMemoryDBCluster_create_noTLS(t *testing.T) { func TestAccMemoryDBCluster_create_withDataTiering(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -258,7 +258,7 @@ func TestAccMemoryDBCluster_create_withDataTiering(t *testing.T) { func TestAccMemoryDBCluster_create_withKMS(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -285,7 +285,7 @@ func TestAccMemoryDBCluster_create_withKMS(t *testing.T) { func TestAccMemoryDBCluster_create_withPort(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -313,8 +313,8 @@ func TestAccMemoryDBCluster_create_withPort(t *testing.T) { func TestAccMemoryDBCluster_create_fromSnapshot(t *testing.T) { ctx := acctest.Context(t) - rName1 := "tf-test-" + sdkacctest.RandString(8) - rName2 := "tf-test-" + sdkacctest.RandString(8) + rName1 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + rName2 := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(t) }, @@ -335,7 +335,7 @@ func TestAccMemoryDBCluster_create_fromSnapshot(t *testing.T) { func TestAccMemoryDBCluster_delete_withFinalSnapshot(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -382,7 +382,7 @@ func TestAccMemoryDBCluster_delete_withFinalSnapshot(t *testing.T) { func TestAccMemoryDBCluster_Update_aclName(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -421,7 +421,7 @@ func TestAccMemoryDBCluster_Update_aclName(t *testing.T) { func TestAccMemoryDBCluster_Update_description(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -478,7 +478,7 @@ func TestAccMemoryDBCluster_Update_description(t *testing.T) { func TestAccMemoryDBCluster_Update_engineVersion(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -491,7 +491,7 @@ func TestAccMemoryDBCluster_Update_engineVersion(t *testing.T) { Config: testAccClusterConfig_engineVersionNull(rName), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "engine_version", "6.2"), + resource.TestCheckResourceAttrSet(resourceName, "engine_version"), ), }, { @@ -500,10 +500,10 @@ func TestAccMemoryDBCluster_Update_engineVersion(t *testing.T) { ImportStateVerify: true, }, { - Config: testAccClusterConfig_engineVersion(rName, "6.2"), + Config: testAccClusterConfig_engineVersion(rName, "7.1"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "engine_version", "6.2"), + resource.TestCheckResourceAttr(resourceName, "engine_version", "7.1"), ), }, { @@ -517,7 +517,7 @@ func TestAccMemoryDBCluster_Update_engineVersion(t *testing.T) { func TestAccMemoryDBCluster_Update_maintenanceWindow(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -556,7 +556,7 @@ func TestAccMemoryDBCluster_Update_maintenanceWindow(t *testing.T) { func TestAccMemoryDBCluster_Update_nodeType(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -598,7 +598,7 @@ func TestAccMemoryDBCluster_Update_nodeType(t *testing.T) { func TestAccMemoryDBCluster_Update_numShards_scaleUp(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -635,7 +635,7 @@ func TestAccMemoryDBCluster_Update_numShards_scaleUp(t *testing.T) { func TestAccMemoryDBCluster_Update_numShards_scaleDown(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -672,7 +672,7 @@ func TestAccMemoryDBCluster_Update_numShards_scaleDown(t *testing.T) { func TestAccMemoryDBCluster_Update_numReplicasPerShard_scaleUp(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -709,7 +709,7 @@ func TestAccMemoryDBCluster_Update_numReplicasPerShard_scaleUp(t *testing.T) { func TestAccMemoryDBCluster_Update_numReplicasPerShard_scaleDown(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -743,7 +743,7 @@ func TestAccMemoryDBCluster_Update_numReplicasPerShard_scaleDown(t *testing.T) { func TestAccMemoryDBCluster_Update_parameterGroup(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -753,10 +753,10 @@ func TestAccMemoryDBCluster_Update_parameterGroup(t *testing.T) { CheckDestroy: testAccCheckClusterDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccClusterConfig_parameterGroup(rName, "default.memorydb-redis6"), + Config: testAccClusterConfig_parameterGroup(rName, "default.memorydb-redis7"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.memorydb-redis6"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.memorydb-redis7"), ), }, { @@ -772,10 +772,10 @@ func TestAccMemoryDBCluster_Update_parameterGroup(t *testing.T) { ), }, { - Config: testAccClusterConfig_parameterGroup(rName, "default.memorydb-redis6"), + Config: testAccClusterConfig_parameterGroup(rName, "default.memorydb-redis7"), Check: resource.ComposeTestCheckFunc( testAccCheckClusterExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.memorydb-redis6"), + resource.TestCheckResourceAttr(resourceName, "parameter_group_name", "default.memorydb-redis7"), ), }, { @@ -789,7 +789,7 @@ func TestAccMemoryDBCluster_Update_parameterGroup(t *testing.T) { func TestAccMemoryDBCluster_Update_securityGroupIds(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -848,7 +848,7 @@ func TestAccMemoryDBCluster_Update_securityGroupIds(t *testing.T) { func TestAccMemoryDBCluster_Update_snapshotRetentionLimit(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -899,7 +899,7 @@ func TestAccMemoryDBCluster_Update_snapshotRetentionLimit(t *testing.T) { func TestAccMemoryDBCluster_Update_snapshotWindow(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -938,7 +938,7 @@ func TestAccMemoryDBCluster_Update_snapshotWindow(t *testing.T) { func TestAccMemoryDBCluster_Update_snsTopicARN(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -989,7 +989,7 @@ func TestAccMemoryDBCluster_Update_snsTopicARN(t *testing.T) { func TestAccMemoryDBCluster_Update_tags(t *testing.T) { ctx := acctest.Context(t) - rName := "tf-test-" + sdkacctest.RandString(8) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) resourceName := "aws_memorydb_cluster.test" resource.ParallelTest(t, resource.TestCase{ @@ -1424,7 +1424,7 @@ func testAccClusterConfig_parameterGroup(rName, parameterGroup string) string { fmt.Sprintf(` resource "aws_memorydb_parameter_group" "test" { name = %[1]q - family = "memorydb_redis6" + family = "memorydb_redis7" parameter { name = "active-defrag-cycle-max" From 5990f9d2d84f0824501e07b2bcf8a6d23e8b8a9a Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Fri, 15 Dec 2023 17:22:07 -0600 Subject: [PATCH 283/438] tweak CHANGELOG entry --- .changelog/31077.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/31077.txt b/.changelog/31077.txt index 85512f2fd72..14fa4b7c17a 100644 --- a/.changelog/31077.txt +++ b/.changelog/31077.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/aws_memorydb_cluster: Treat "snapshotting" status as pending when creating cluster +resource/aws_memorydb_cluster: Treat `snapshotting` status as pending when creating cluster ``` \ No newline at end of file From 1e47a79292222dd43c17949ade67aa3a34572c44 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 08:04:35 -0500 Subject: [PATCH 284/438] json.RemoveEmptyFields: Use a stack. --- internal/json/remove.go | 15 ++--- internal/json/remove_test.go | 107 +++++++++++++++++------------------ 2 files changed, 61 insertions(+), 61 deletions(-) diff --git a/internal/json/remove.go b/internal/json/remove.go index 1191adf2e07..167b9cee26a 100644 --- a/internal/json/remove.go +++ b/internal/json/remove.go @@ -7,6 +7,7 @@ import ( "bytes" "github.com/hashicorp/terraform-provider-aws/internal/json/ujson" + "github.com/hashicorp/terraform-provider-aws/internal/types/stack" ) // RemoveFields removes the specified fields from a valid JSON string. @@ -60,7 +61,7 @@ func RemoveEmptyFields(in string) string { // Returns the new JSON string and the number of empty fields removed. func removeEmptyFields(in string) (string, int) { out := make([]byte, 0, len(in)) - lenBefore := 0 + before := stack.New[int]() removed := 0 err := ujson.Walk([]byte(in), func(_ int, key, value []byte) bool { @@ -72,21 +73,21 @@ func removeEmptyFields(in string) (string, int) { case 'n': // Null (null) skip = true case '[': // Start of array - lenBefore = n + before.Push(n) case ']': // End of array + i := before.Pop().MustUnwrap() if out[n-1] == '[' { // Truncate output. - out = out[:lenBefore] - lenBefore = 0 + out = out[:i] skip = true } case '{': // Start of object - lenBefore = n + before.Push(n) case '}': // End of object + i := before.Pop().MustUnwrap() if n > 1 && out[n-1] == '{' { // Truncate output. - out = out[:lenBefore] - lenBefore = 0 + out = out[:i] skip = true } } diff --git a/internal/json/remove_test.go b/internal/json/remove_test.go index 629054ed144..0f6e39e2597 100644 --- a/internal/json/remove_test.go +++ b/internal/json/remove_test.go @@ -52,67 +52,66 @@ func TestRemoveEmptyFields(t *testing.T) { input string want string }{ - // { - // testName: "empty JSON", - // input: "{}", - // want: "{}", - // }, - // { - // testName: "single non-empty simple field", - // input: `{"key": 42}`, - // want: `{"key":42}`, - // }, - // { - // testName: "single non-empty array field", - // input: `{"key": [1, true, "answer"]}`, - // want: `{"key":[1,true,"answer"]}`, - // }, - // { - // testName: "single non-empty object field", - // input: `{"key": {"inner": true}}`, - // want: `{"key":{"inner":true}}`, - // }, - // { - // testName: "single null field", - // input: `{"key": null}`, - // want: `{}`, - // }, - // { - // testName: "single empty array field", - // input: `{"key": []}`, - // want: `{}`, - // }, - // { - // testName: "single empty object field", - // input: `{"key": {}}`, - // want: `{}`, - // }, - // { - // testName: "empty fields deeply nested 1 pass", - // input: `{"key": {"a": [1, 2], "b": [], "c": {"d": true, "e": null}}}`, - // want: `{"key":{"a":[1,2],"c":{"d":true}}}`, - // }, - // { - // testName: "empty fields deeply nested 2 passes", - // input: `{"key": {"a": [1, 2], "b": {}, "c": {"d": null}}}`, - // want: `{"key":{"a":[1,2]}}`, - // }, { - testName: "empty fields deeply nested 2 passes many empty objects", - // input: `{"key": {"a": [1, 2], "b": {}, "c": {"d": {}}, "e": {}, "f": 99}}`, - input: `{"key": {"a": [1, 2], "c": {"d": {}}, "f": 99}}`, - want: `{"key":{"a":[1,2],"f":99}}`, + testName: "empty JSON", + input: "{}", + want: "{}", + }, + { + testName: "single non-empty simple field", + input: `{"key": 42}`, + want: `{"key":42}`, + }, + { + testName: "single non-empty array field", + input: `{"key": [1, true, "answer"]}`, + want: `{"key":[1,true,"answer"]}`, + }, + { + testName: "single non-empty object field", + input: `{"key": {"inner": true}}`, + want: `{"key":{"inner":true}}`, + }, + { + testName: "single null field", + input: `{"key": null}`, + want: `{}`, + }, + { + testName: "single empty array field", + input: `{"key": []}`, + want: `{}`, + }, + { + testName: "single empty object field", + input: `{"key": {}}`, + want: `{}`, + }, + { + testName: "empty fields deeply nested 1 pass", + input: `{"key": {"a": [1, 2], "b": [], "c": {"d": true, "e": null}}}`, + want: `{"key":{"a":[1,2],"c":{"d":true}}}`, + }, + { + testName: "empty fields deeply nested 2 passes", + input: `{"key": {"a": [1, 2], "b": {}, "c": {"d": null}}}`, + want: `{"key":{"a":[1,2]}}`, + }, + { + testName: "empty fields deeply nested many empty objects", + input: `{"key": {"a": [1, 2], "b": {}, "c": {"d": {}}, "e": {}, "f": 99}}`, + want: `{"key":{"a":[1,2],"f":99}}`, }, { testName: "empty fields nested empty arrays", input: `{"key": {"a": [1, [2], [], [[]], 3]}}`, want: `{"key":{"a":[1,[2],3]}}`, }, - // { - // testName: "real life example", - // input: `{"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"TTSettings":{"TTS3Settings":{},"TTRecordSettings":{}},"Logging":{},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"LoopbackPreventionSettings":{},"CharacterSetSettings":{"CharacterSetSupport":{}},"BeforeImageSettings":{},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}}`, - // want: `{"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}}`, - // }, + { + testName: "real life example", + input: `{"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"TTSettings":{"TTS3Settings":{},"TTRecordSettings":{}},"Logging":{},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"LoopbackPreventionSettings":{},"CharacterSetSettings":{"CharacterSetSupport":{}},"BeforeImageSettings":{},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}}`, + want: `{"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}}`, + }, } for _, testCase := range testCases { From 270c0a414df0236f0d99aa20034a93eda7533378 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 08:18:01 -0500 Subject: [PATCH 285/438] Use 'v1.52.0' of the semgrep image. --- .github/workflows/semgrep-ci.yml | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/semgrep-ci.yml b/.github/workflows/semgrep-ci.yml index c466ddb5b2d..8929554fe4e 100644 --- a/.github/workflows/semgrep-ci.yml +++ b/.github/workflows/semgrep-ci.yml @@ -21,7 +21,7 @@ jobs: name: Code Quality Scan runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - run: | @@ -43,7 +43,7 @@ jobs: name: Naming Scan Caps/AWS/EC2 runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -54,7 +54,7 @@ jobs: name: Test Configs Scan runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -65,7 +65,7 @@ jobs: name: Service Name Scan A-C runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -76,7 +76,7 @@ jobs: name: Service Name Scan C-I runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -87,7 +87,7 @@ jobs: name: Service Name Scan I-Q runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 @@ -98,7 +98,7 @@ jobs: name: Service Name Scan Q-Z runs-on: ubuntu-latest container: - image: returntocorp/semgrep + image: "returntocorp/semgrep:1.52.0" if: (github.action != 'dependabot[bot]') steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 From fe2d19228385fa39c3c152e637e3c93e0a6119f1 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Mon, 18 Dec 2023 13:24:54 +0000 Subject: [PATCH 286/438] Update CHANGELOG.md for #34964 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c9d48d4ff7..53e46b675d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,12 @@ ENHANCEMENTS: * data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) +BUG FIXES: + +* data-source/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) +* resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created ([#34848](https://github.com/hashicorp/terraform-provider-aws/issues/34848)) +* resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) + ## 5.31.0 (December 15, 2023) FEATURES: From 9c30cb0993d405854bfb40c1af180a1b1c5e95cb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 08:30:07 -0500 Subject: [PATCH 287/438] json.RemoveEmptyFields: 'string' -> '[]byte'. --- internal/json/remove.go | 10 +++++----- internal/json/remove_test.go | 4 +--- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/internal/json/remove.go b/internal/json/remove.go index 167b9cee26a..c6e5fa6c4ff 100644 --- a/internal/json/remove.go +++ b/internal/json/remove.go @@ -45,7 +45,7 @@ func RemoveFields(in string, fields ...string) string { } // RemoveEmptyFields removes all empty fields from a valid JSON string. -func RemoveEmptyFields(in string) string { +func RemoveEmptyFields(in []byte) []byte { n := 0 for { in, n = removeEmptyFields(in) @@ -59,12 +59,12 @@ func RemoveEmptyFields(in string) string { // removeEmptyFields removes `null`, empty array (`[]`) and empty object (`{}`) fields from a valid JSON string. // Returns the new JSON string and the number of empty fields removed. -func removeEmptyFields(in string) (string, int) { +func removeEmptyFields(in []byte) ([]byte, int) { out := make([]byte, 0, len(in)) before := stack.New[int]() removed := 0 - err := ujson.Walk([]byte(in), func(_ int, key, value []byte) bool { + err := ujson.Walk(in, func(_ int, key, value []byte) bool { n := len(out) // For valid JSON, value will never be empty. @@ -110,8 +110,8 @@ func removeEmptyFields(in string) (string, int) { }) if err != nil { - return "", 0 + return nil, 0 } - return string(out), removed + return out, removed } diff --git a/internal/json/remove_test.go b/internal/json/remove_test.go index 0f6e39e2597..0e4b8445e30 100644 --- a/internal/json/remove_test.go +++ b/internal/json/remove_test.go @@ -119,11 +119,9 @@ func TestRemoveEmptyFields(t *testing.T) { t.Run(testCase.testName, func(t *testing.T) { t.Parallel() - if got, want := RemoveEmptyFields(testCase.input), testCase.want; got != want { + if got, want := RemoveEmptyFields([]byte(testCase.input)), testCase.want; string(got) != want { t.Errorf("RemoveEmptyFields(%q) = %q, want %q", testCase.input, got, want) } }) } } - -// {"TargetMetadata":{"SupportLobs":true,"LimitedSizeLobMode":true,"LobMaxSize":32},"FullLoadSettings":{"TargetTablePrepMode":"DROP_AND_CREATE","MaxFullLoadSubTasks":8,"TransactionConsistencyTimeout":600,"CommitRate":10000},"TTSettings":{"TTS3Settings":{},"TTRecordSettings":{}},"Logging":{},"ControlTablesSettings":{"HistoryTimeslotInMinutes":5},"StreamBufferSettings":{"StreamBufferCount":3,"StreamBufferSizeInMB":8},"ChangeProcessingTuning":{"BatchApplyPreserveTransaction":true,"BatchApplyTimeoutMin":1,"BatchApplyTimeoutMax":30,"BatchApplyMemoryLimit":500,"MinTransactionSize":1000,"CommitTimeout":1,"MemoryLimitTotal":1024,"MemoryKeepTime":60,"StatementCacheSize":50},"ChangeProcessingDdlHandlingPolicy":{"HandleSourceTableDropped":true,"HandleSourceTableTruncated":true,"HandleSourceTableAltered":true},"LoopbackPreventionSettings":{},"CharacterSetSettings":{"CharacterSetSupport":{}},"BeforeImageSettings":{},"ErrorBehavior":{"DataErrorPolicy":"LOG_ERROR","DataTruncationErrorPolicy":"LOG_ERROR","DataErrorEscalationPolicy":"SUSPEND_TABLE","TableErrorPolicy":"SUSPEND_TABLE","TableErrorEscalationPolicy":"STOP_TASK","RecoverableErrorCount":-1,"RecoverableErrorInterval":5,"RecoverableErrorThrottling":true,"RecoverableErrorThrottlingMax":1800,"ApplyErrorDeletePolicy":"IGNORE_RECORD","ApplyErrorInsertPolicy":"LOG_ERROR","ApplyErrorUpdatePolicy":"LOG_ERROR","ApplyErrorEscalationPolicy":"LOG_ERROR","FullLoadIgnoreConflicts":true},"ValidationSettings":{"ValidationMode":"ROW_LEVEL","ThreadCount":5,"PartitionSize":10000,"FailureMaxCount":10000,"TableFailureMaxCount":1000}} From 0580b86288af4ff705b17069cac930646ce641d0 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Mon, 18 Dec 2023 10:06:10 -0500 Subject: [PATCH 288/438] r/aws_ssoadmin_application_access_scope(doc): adjust id description --- website/docs/r/ssoadmin_application_access_scope.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/r/ssoadmin_application_access_scope.html.markdown b/website/docs/r/ssoadmin_application_access_scope.html.markdown index cf263e39d8f..f00c51a9587 100644 --- a/website/docs/r/ssoadmin_application_access_scope.html.markdown +++ b/website/docs/r/ssoadmin_application_access_scope.html.markdown @@ -44,7 +44,7 @@ The following arguments are optional: This resource exports the following attributes in addition to the arguments above: -* `id` - ARN of the application. +* `id` - A comma-delimited string concatenating `application_arn` and `scope`. ## Import From 87b48416f62936300e20a85f748579417b245153 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:07:46 -0500 Subject: [PATCH 289/438] dms: Add 'suppressEquivalentTaskSettings'. --- internal/service/dms/replication_config.go | 6 ++---- internal/service/dms/task_settings_json.go | 22 ++++++++++++++++++++-- 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index 6d5862550df..d2468d9b082 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -116,7 +116,7 @@ func ResourceReplicationConfig() *schema.Resource { Optional: true, Computed: true, ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + DiffSuppressFunc: suppressEquivalentTaskSettings, DiffSuppressOnRefresh: true, }, "replication_type": { @@ -233,9 +233,7 @@ func resourceReplicationConfigRead(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "setting compute_config: %s", err) } d.Set("replication_config_identifier", replicationConfig.ReplicationConfigIdentifier) - v := flattenTaskSettings(aws.StringValue(replicationConfig.ReplicationSettings)) - log.Printf("[INFO] replication_settings=%v", v) - d.Set("replication_settings", v) + d.Set("replication_settings", replicationConfig.ReplicationSettings) d.Set("replication_type", replicationConfig.ReplicationType) d.Set("source_endpoint_arn", replicationConfig.SourceEndpointArn) d.Set("supplemental_settings", replicationConfig.SupplementalSettings) diff --git a/internal/service/dms/task_settings_json.go b/internal/service/dms/task_settings_json.go index 67275921354..f3cb248a73e 100644 --- a/internal/service/dms/task_settings_json.go +++ b/internal/service/dms/task_settings_json.go @@ -5,6 +5,10 @@ package dms import ( "encoding/json" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tfjson "github.com/hashicorp/terraform-provider-aws/internal/json" + "github.com/hashicorp/terraform-provider-aws/internal/verify" ) // https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html#CHAP_Tasks.CustomizingTasks.TaskSettings.Example @@ -143,7 +147,10 @@ type taskSettings struct { } `json:"ValidationSettings,omitempty"` } -func flattenTaskSettings(apiObject string) string { +// normalizeTaskSettings returns a normalized DMS task settings JSON string. +// Read-only (non-configurable) fields are removed by using the published "schema". +// Empty fields are then removed. +func normalizeTaskSettings(apiObject string) string { var taskSettings taskSettings if err := json.Unmarshal([]byte(apiObject), &taskSettings); err != nil { @@ -153,6 +160,17 @@ func flattenTaskSettings(apiObject string) string { if b, err := json.Marshal(&taskSettings); err != nil { return apiObject } else { - return string(b) + return string(tfjson.RemoveEmptyFields(b)) + } +} + +// suppressEquivalentTaskSettings provides custom difference suppression for task settings. +func suppressEquivalentTaskSettings(k, old, new string, d *schema.ResourceData) bool { + if !json.Valid([]byte(old)) || !json.Valid([]byte(new)) { + return old == new } + + old, new = normalizeTaskSettings(old), normalizeTaskSettings(new) + + return verify.JSONStringsEqual(old, new) } From 3931c1d5792941625c1435c940266713461305ee Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Mon, 18 Dec 2023 10:10:27 -0500 Subject: [PATCH 290/438] r/aws_ssoadmin_application_access_scope(test): add prechecks --- .../ssoadmin/application_access_scope_test.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/internal/service/ssoadmin/application_access_scope_test.go b/internal/service/ssoadmin/application_access_scope_test.go index 65a9bd11ad5..dd0c9b00b75 100644 --- a/internal/service/ssoadmin/application_access_scope_test.go +++ b/internal/service/ssoadmin/application_access_scope_test.go @@ -28,7 +28,11 @@ func TestAccSSOAdminApplicationAccessScope_basic(t *testing.T) { applicationResourceName := "aws_ssoadmin_application.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SSOAdminEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckApplicationAccessScopeDestroy(ctx), @@ -57,7 +61,11 @@ func TestAccSSOAdminApplicationAccessScope_disappears(t *testing.T) { resourceName := "aws_ssoadmin_application_access_scope.test" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t) }, + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, names.SSOAdminEndpointID) + acctest.PreCheckSSOAdminInstances(ctx, t) + }, ErrorCheck: acctest.ErrorCheck(t, names.SSOAdminEndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckApplicationAccessScopeDestroy(ctx), From 80ff3ae2ba370938dbc167eee9262a381126430d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:12:32 -0500 Subject: [PATCH 291/438] dms: Add 'setLastReplicationError'. --- internal/service/dms/replication_config.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index d2468d9b082..adda23214e9 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -5,6 +5,7 @@ package dms import ( "context" + "errors" "fmt" "log" "time" @@ -19,6 +20,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -462,6 +464,22 @@ func statusReplication(ctx context.Context, conn *dms.DatabaseMigrationService, } } +func setLastReplicationError(err error, replication *dms.Replication) { + var errs []error + + errs = append(errs, tfslices.ApplyToAll(replication.FailureMessages, func(v *string) error { + if v := aws.StringValue(v); v != "" { + return errors.New(v) + } + return nil + })...) + if v := aws.StringValue(replication.StopReason); v != "" { + errs = append(errs, errors.New(v)) + } + + tfresource.SetLastError(err, errors.Join(errs...)) +} + func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationService, arn string, timeout time.Duration) (*dms.Replication, error) { stateConf := &retry.StateChangeConf{ Pending: []string{ @@ -484,6 +502,7 @@ func waitReplicationRunning(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*dms.Replication); ok { + setLastReplicationError(err, output) return output, err } @@ -503,6 +522,7 @@ func waitReplicationStopped(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*dms.Replication); ok { + setLastReplicationError(err, output) return output, err } @@ -522,6 +542,7 @@ func waitReplicationDeleted(ctx context.Context, conn *dms.DatabaseMigrationServ outputRaw, err := stateConf.WaitForStateContext(ctx) if output, ok := outputRaw.(*dms.Replication); ok { + setLastReplicationError(err, output) return output, err } From 2a313df306e4769b2a68cb666e50c0cf34804053 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:15:46 -0500 Subject: [PATCH 292/438] r/aws_dms_replication_task: Use 'dms.MigrationTypeValue_Values()'. --- internal/service/dms/replication_task.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index aeb3ae87f0f..5c36d043f0f 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -53,13 +53,9 @@ func ResourceReplicationTask() *schema.Resource { ConflictsWith: []string{"cdc_start_position"}, }, "migration_type": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice([]string{ - dms.MigrationTypeValueFullLoad, - dms.MigrationTypeValueCdc, - dms.MigrationTypeValueFullLoadAndCdc, - }, false), + Type: schema.TypeString, + Required: true, + ValidateFunc: validation.StringInSlice(dms.MigrationTypeValue_Values(), false), }, "replication_instance_arn": { Type: schema.TypeString, From 7d8b6096c87b8e408c4217fc8664d5167cafc93b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:33:08 -0500 Subject: [PATCH 293/438] Tweak CHANGELOG entry. --- .changelog/34356.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/34356.txt b/.changelog/34356.txt index 1dd0e3aafe1..bf5c6d0a2a4 100644 --- a/.changelog/34356.txt +++ b/.changelog/34356.txt @@ -1,3 +1,3 @@ ```release-note:bug -resource/aws_dms_replication_config: Remove read-only properties from `replication_settings` to suppress diffs +resource/aws_dms_replication_config: Prevent erroneous diffs on `replication_settings` ``` From 25840120d10b3be28dd4afa7b4437da471aa0236 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:36:58 -0500 Subject: [PATCH 294/438] Tweak CHANGELOG entry. --- .changelog/31917.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/31917.txt b/.changelog/31917.txt index b6f0cbda065..a0a0bc638d7 100644 --- a/.changelog/31917.txt +++ b/.changelog/31917.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_dms_replication_task: allow cdc_start_time parameter to use RFC3339 formatted date additionally to a UNIX timestamp. +resource/aws_dms_replication_task: Allow `cdc_start_time` to use [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) formatted dates in addition to UNIX timestamps ``` \ No newline at end of file From be6996b1ec2449d7c900c2f65dbf94316fc1d912 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:38:36 -0500 Subject: [PATCH 295/438] Tweak documentation. --- website/docs/r/dms_replication_task.html.markdown | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/website/docs/r/dms_replication_task.html.markdown b/website/docs/r/dms_replication_task.html.markdown index 62116187d92..84ee9d4c752 100644 --- a/website/docs/r/dms_replication_task.html.markdown +++ b/website/docs/r/dms_replication_task.html.markdown @@ -37,12 +37,8 @@ resource "aws_dms_replication_task" "test" { This resource supports the following arguments: -* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) - operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the - source engine. For more information, - see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). -* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string or UNIX timestamp for - the start of the Change Data Capture (CDC) operation. +* `cdc_start_position` - (Optional, Conflicts with `cdc_start_time`) Indicates when you want a change data capture (CDC) operation to start. The value can be a RFC3339 formatted date, a checkpoint, or a LSN/SCN format depending on the source engine. For more information see [Determining a CDC native start point](https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Task.CDC.html#CHAP_Task.CDC.StartPoint.Native). +* `cdc_start_time` - (Optional, Conflicts with `cdc_start_position`) RFC3339 formatted date string or UNIX timestamp for the start of the Change Data Capture (CDC) operation. * `migration_type` - (Required) The migration type. Can be one of `full-load | cdc | full-load-and-cdc`. * `replication_instance_arn` - (Required) The Amazon Resource Name (ARN) of the replication instance. * `replication_task_id` - (Required) The replication task identifier. From c6c270fbf0728adfaa1084312a10eb5b6e9a4a41 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:43:53 -0500 Subject: [PATCH 296/438] Add 'timestamp.IsRFC3339()'. --- internal/types/timestamp/timestamp.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/types/timestamp/timestamp.go b/internal/types/timestamp/timestamp.go index e33b72f2d57..7af319be731 100644 --- a/internal/types/timestamp/timestamp.go +++ b/internal/types/timestamp/timestamp.go @@ -58,3 +58,9 @@ func (t Timestamp) ValidateUTCFormat() error { return nil } + +// See https://www.rfc-editor.org/rfc/rfc3339. +func IsRFC3339(s string) bool { + _, err := time.Parse(time.RFC3339, s) + return err == nil +} From 6926b3f1a5013dcf23f1cdde6f6db8b3cfa2a337 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Mon, 18 Dec 2023 10:47:31 -0500 Subject: [PATCH 297/438] r/aws_ssoadmin_application_access_scope: prefer flex id functions, adjust finder --- .../ssoadmin/application_access_scope.go | 68 +++++++++++-------- .../ssoadmin/application_access_scope_test.go | 18 +---- 2 files changed, 40 insertions(+), 46 deletions(-) diff --git a/internal/service/ssoadmin/application_access_scope.go b/internal/service/ssoadmin/application_access_scope.go index 7999ca4134f..e0b2a3f233c 100644 --- a/internal/service/ssoadmin/application_access_scope.go +++ b/internal/service/ssoadmin/application_access_scope.go @@ -6,8 +6,6 @@ package ssoadmin import ( "context" "errors" - "fmt" - "strings" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ssoadmin" @@ -22,6 +20,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" + intflex "github.com/hashicorp/terraform-provider-aws/internal/flex" "github.com/hashicorp/terraform-provider-aws/internal/framework" "github.com/hashicorp/terraform-provider-aws/internal/framework/flex" fwtypes "github.com/hashicorp/terraform-provider-aws/internal/framework/types" @@ -36,6 +35,8 @@ func newResourceApplicationAccessScope(_ context.Context) (resource.ResourceWith const ( ResNameApplicationAccessScope = "Application Access Scope" + + applicationAccessScopeIDPartCount = 2 ) type resourceApplicationAccessScope struct { @@ -108,7 +109,20 @@ func (r *resourceApplicationAccessScope) Create(ctx context.Context, req resourc return } - plan.ID = flex.StringToFramework(ctx, ApplicationAccessScopeCreateResourceID(plan.ApplicationARN.ValueString(), plan.Scope.ValueString())) + idParts := []string{ + plan.ApplicationARN.ValueString(), + plan.Scope.ValueString(), + } + id, err := intflex.FlattenResourceId(idParts, applicationAccessScopeIDPartCount, false) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionCreating, ResNameApplicationAccessScope, plan.ApplicationARN.String(), err), + err.Error(), + ) + return + } + + plan.ID = types.StringValue(id) resp.Diagnostics.Append(resp.State.Set(ctx, plan)...) } @@ -122,9 +136,7 @@ func (r *resourceApplicationAccessScope) Read(ctx context.Context, req resource. return } - applicationARN, scope, _ := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) - - out, err := findApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + out, err := findApplicationAccessScopeByID(ctx, conn, state.ID.ValueString()) if tfresource.NotFound(err) { resp.State.RemoveResource(ctx) return @@ -137,7 +149,18 @@ func (r *resourceApplicationAccessScope) Read(ctx context.Context, req resource. return } - state.ApplicationARN = flex.StringToFrameworkARN(ctx, aws.String(applicationARN)) + // ApplicationARN is not returned in the finder output. To allow import to set + // all attributes correctly, parse the ID for this value instead. + parts, err := intflex.ExpandResourceId(state.ID.ValueString(), applicationAccessScopeIDPartCount, false) + if err != nil { + resp.Diagnostics.AddError( + create.ProblemStandardMessage(names.SSOAdmin, create.ErrActionSetting, ResNameApplicationAccessScope, state.ID.String(), err), + err.Error(), + ) + return + } + + state.ApplicationARN = fwtypes.ARNValue(parts[0]) state.AuthorizedTargets = flex.FlattenFrameworkStringValueList(ctx, out.AuthorizedTargets) state.Scope = flex.StringToFramework(ctx, out.Scope) @@ -157,10 +180,9 @@ func (r *resourceApplicationAccessScope) Delete(ctx context.Context, req resourc return } - applicationARN, scope, _ := ApplicationAccessScopeParseResourceID(state.ID.ValueString()) in := &ssoadmin.DeleteApplicationAccessScopeInput{ - ApplicationArn: aws.String(applicationARN), - Scope: aws.String(scope), + ApplicationArn: aws.String(state.ApplicationARN.ValueString()), + Scope: aws.String(state.Scope.ValueString()), } _, err := conn.DeleteApplicationAccessScope(ctx, in) @@ -180,29 +202,15 @@ func (r *resourceApplicationAccessScope) ImportState(ctx context.Context, req re resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } -const applicationAccessScopeIDSeparator = "," - -func ApplicationAccessScopeCreateResourceID(applicationARN, scope string) *string { - parts := []string{applicationARN, scope} - id := strings.Join(parts, applicationAccessScopeIDSeparator) - - return &id -} - -func ApplicationAccessScopeParseResourceID(id string) (string, string, error) { - parts := strings.Split(id, applicationAccessScopeIDSeparator) - - if len(parts) == 2 && parts[0] != "" && parts[1] != "" { - return parts[0], parts[1], nil +func findApplicationAccessScopeByID(ctx context.Context, conn *ssoadmin.Client, id string) (*ssoadmin.GetApplicationAccessScopeOutput, error) { + parts, err := intflex.ExpandResourceId(id, applicationAccessScopeIDPartCount, false) + if err != nil { + return nil, err } - return "", "", fmt.Errorf("unexpected format for ID (%[1]s), expected APPLICATION_ARN%[2]sSCOPE", id, applicationAccessScopeIDSeparator) -} - -func findApplicationAccessScopeByID(ctx context.Context, conn *ssoadmin.Client, applicationARN, scope string) (*ssoadmin.GetApplicationAccessScopeOutput, error) { in := &ssoadmin.GetApplicationAccessScopeInput{ - ApplicationArn: aws.String(applicationARN), - Scope: aws.String(scope), + ApplicationArn: aws.String(parts[0]), + Scope: aws.String(parts[1]), } out, err := conn.GetApplicationAccessScope(ctx, in) diff --git a/internal/service/ssoadmin/application_access_scope_test.go b/internal/service/ssoadmin/application_access_scope_test.go index dd0c9b00b75..1caf9bed685 100644 --- a/internal/service/ssoadmin/application_access_scope_test.go +++ b/internal/service/ssoadmin/application_access_scope_test.go @@ -48,7 +48,6 @@ func TestAccSSOAdminApplicationAccessScope_basic(t *testing.T) { { ResourceName: resourceName, ImportState: true, - ImportStateIdFunc: testAccApplicationAccessScopeImportStateIdFunc(resourceName), ImportStateVerify: true, }, }, @@ -91,8 +90,7 @@ func testAccCheckApplicationAccessScopeDestroy(ctx context.Context) resource.Tes continue } - applicationARN, scope, _ := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) - _, err := tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + _, err := tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, rs.Primary.ID) if errs.IsA[*types.ResourceNotFoundException](err) { return nil } @@ -120,8 +118,7 @@ func testAccCheckApplicationAccessScopeExists(ctx context.Context, name string) conn := acctest.Provider.Meta().(*conns.AWSClient).SSOAdminClient(ctx) - applicationARN, scope, _ := tfssoadmin.ApplicationAccessScopeParseResourceID(rs.Primary.ID) - _, err := tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, applicationARN, scope) + _, err := tfssoadmin.FindApplicationAccessScopeByID(ctx, conn, rs.Primary.ID) if err != nil { return create.Error(names.SSOAdmin, create.ErrActionCheckingExistence, tfssoadmin.ResNameApplicationAccessScope, rs.Primary.ID, err) } @@ -130,17 +127,6 @@ func testAccCheckApplicationAccessScopeExists(ctx context.Context, name string) } } -func testAccApplicationAccessScopeImportStateIdFunc(resourceName string) resource.ImportStateIdFunc { - return func(s *terraform.State) (string, error) { - rs, ok := s.RootModule().Resources[resourceName] - if !ok { - return "", fmt.Errorf("Not Found: %s", resourceName) - } - - return fmt.Sprintf("%s,%s", rs.Primary.Attributes["application_arn"], rs.Primary.Attributes["scope"]), nil - } -} - func testAccApplicationAccessScopeConfig_basic(rName, scope string) string { return fmt.Sprintf(` data "aws_ssoadmin_instances" "test" {} From 2151d9224436526fd2c94f71cc0c43202be8825f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 10:50:32 -0500 Subject: [PATCH 298/438] build(deps): bump github.com/aws/aws-sdk-go in /.ci/providerlint (#34960) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.49.2 to 1.49.4. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.2...v1.49.4) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 355 ++++++++++++++++++ .ci/providerlint/vendor/modules.txt | 2 +- 4 files changed, 359 insertions(+), 4 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 8d6db90f68b..5e7746396f9 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.2 + github.com/aws/aws-sdk-go v1.49.4 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 8755e6de2fb..8a4d4cf6f34 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -8,8 +8,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= -github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= +github.com/aws/aws-sdk-go v1.49.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 41a2711656a..e519246f43e 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -6621,6 +6621,9 @@ var awsPartition = partition{ }, "cognito-identity": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6639,6 +6642,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -6745,6 +6751,9 @@ var awsPartition = partition{ }, "cognito-idp": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{}, endpointKey{ Region: "ap-northeast-1", }: endpoint{}, @@ -6763,6 +6772,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -10454,6 +10466,161 @@ var awsPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "af-south-1", + }: endpoint{ + Hostname: "eks-auth.af-south-1.api.aws", + }, + endpointKey{ + Region: "ap-east-1", + }: endpoint{ + Hostname: "eks-auth.ap-east-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-1.api.aws", + }, + endpointKey{ + Region: "ap-northeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-2.api.aws", + }, + endpointKey{ + Region: "ap-northeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-northeast-3.api.aws", + }, + endpointKey{ + Region: "ap-south-1", + }: endpoint{ + Hostname: "eks-auth.ap-south-1.api.aws", + }, + endpointKey{ + Region: "ap-south-2", + }: endpoint{ + Hostname: "eks-auth.ap-south-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-1", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-1.api.aws", + }, + endpointKey{ + Region: "ap-southeast-2", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-2.api.aws", + }, + endpointKey{ + Region: "ap-southeast-3", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-3.api.aws", + }, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{ + Hostname: "eks-auth.ap-southeast-4.api.aws", + }, + endpointKey{ + Region: "ca-central-1", + }: endpoint{ + Hostname: "eks-auth.ca-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-1", + }: endpoint{ + Hostname: "eks-auth.eu-central-1.api.aws", + }, + endpointKey{ + Region: "eu-central-2", + }: endpoint{ + Hostname: "eks-auth.eu-central-2.api.aws", + }, + endpointKey{ + Region: "eu-north-1", + }: endpoint{ + Hostname: "eks-auth.eu-north-1.api.aws", + }, + endpointKey{ + Region: "eu-south-1", + }: endpoint{ + Hostname: "eks-auth.eu-south-1.api.aws", + }, + endpointKey{ + Region: "eu-south-2", + }: endpoint{ + Hostname: "eks-auth.eu-south-2.api.aws", + }, + endpointKey{ + Region: "eu-west-1", + }: endpoint{ + Hostname: "eks-auth.eu-west-1.api.aws", + }, + endpointKey{ + Region: "eu-west-2", + }: endpoint{ + Hostname: "eks-auth.eu-west-2.api.aws", + }, + endpointKey{ + Region: "eu-west-3", + }: endpoint{ + Hostname: "eks-auth.eu-west-3.api.aws", + }, + endpointKey{ + Region: "il-central-1", + }: endpoint{ + Hostname: "eks-auth.il-central-1.api.aws", + }, + endpointKey{ + Region: "me-central-1", + }: endpoint{ + Hostname: "eks-auth.me-central-1.api.aws", + }, + endpointKey{ + Region: "me-south-1", + }: endpoint{ + Hostname: "eks-auth.me-south-1.api.aws", + }, + endpointKey{ + Region: "sa-east-1", + }: endpoint{ + Hostname: "eks-auth.sa-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-1", + }: endpoint{ + Hostname: "eks-auth.us-east-1.api.aws", + }, + endpointKey{ + Region: "us-east-2", + }: endpoint{ + Hostname: "eks-auth.us-east-2.api.aws", + }, + endpointKey{ + Region: "us-west-1", + }: endpoint{ + Hostname: "eks-auth.us-west-1.api.aws", + }, + endpointKey{ + Region: "us-west-2", + }: endpoint{ + Hostname: "eks-auth.us-west-2.api.aws", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -20504,6 +20671,9 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -24440,6 +24610,42 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-us-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-east-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-west-2", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -24449,15 +24655,39 @@ var awsPartition = partition{ endpointKey{ Region: "us-east-1", }: endpoint{}, + endpointKey{ + Region: "us-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-1.amazonaws.com", + }, endpointKey{ Region: "us-east-2", }: endpoint{}, + endpointKey{ + Region: "us-east-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-east-2.amazonaws.com", + }, endpointKey{ Region: "us-west-1", }: endpoint{}, + endpointKey{ + Region: "us-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-1.amazonaws.com", + }, endpointKey{ Region: "us-west-2", }: endpoint{}, + endpointKey{ + Region: "us-west-2", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-west-2.amazonaws.com", + }, }, }, "route53": service{ @@ -34119,6 +34349,31 @@ var awscnPartition = partition{ }: endpoint{}, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.amazonwebservices.com.cn", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.amazonwebservices.com.cn", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "cn-north-1", + }: endpoint{ + Hostname: "eks-auth.cn-north-1.api.amazonwebservices.com.cn", + }, + endpointKey{ + Region: "cn-northwest-1", + }: endpoint{ + Hostname: "eks-auth.cn-northwest-1.api.amazonwebservices.com.cn", + }, + }, + }, "elasticache": service{ Endpoints: serviceEndpoints{ endpointKey{ @@ -37680,6 +37935,31 @@ var awsusgovPartition = partition{ }, }, }, + "eks-auth": service{ + Defaults: endpointDefaults{ + defaultKey{}: endpoint{ + DNSSuffix: "api.aws", + }, + defaultKey{ + Variant: fipsVariant, + }: endpoint{ + Hostname: "{service}-fips.{region}.{dnsSuffix}", + DNSSuffix: "api.aws", + }, + }, + Endpoints: serviceEndpoints{ + endpointKey{ + Region: "us-gov-east-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-east-1.api.aws", + }, + endpointKey{ + Region: "us-gov-west-1", + }: endpoint{ + Hostname: "eks-auth.us-gov-west-1.api.aws", + }, + }, + }, "elasticache": service{ Defaults: endpointDefaults{ defaultKey{}: endpoint{}, @@ -40377,12 +40657,42 @@ var awsusgovPartition = partition{ }, "rolesanywhere": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-gov-east-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-gov-west-1", + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-gov-east-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-east-1.amazonaws.com", + }, endpointKey{ Region: "us-gov-west-1", }: endpoint{}, + endpointKey{ + Region: "us-gov-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rolesanywhere-fips.us-gov-west-1.amazonaws.com", + }, }, }, "route53": service{ @@ -42807,12 +43117,42 @@ var awsisoPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-east-1.c2s.ic.gov", + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-iso-west-1.c2s.ic.gov", + }, }, }, "rbin": service{ @@ -43677,9 +44017,24 @@ var awsisobPartition = partition{ }, "ram": service{ Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "rbin": service{ diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index 3b328cf28be..adf9ee493d6 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.2 +# github.com/aws/aws-sdk-go v1.49.4 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints From d566e40c8594ea2e59b99abbabc3d842cbe04634 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 10:50:51 -0500 Subject: [PATCH 299/438] build(deps): bump the aws-sdk-go group with 5 updates (#34959) Bumps the aws-sdk-go group with 5 updates: | Package | From | To | | --- | --- | --- | | [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) | `1.49.2` | `1.49.4` | | [github.com/aws/aws-sdk-go-v2/service/controltower](https://github.com/aws/aws-sdk-go-v2) | `1.10.5` | `1.10.6` | | [github.com/aws/aws-sdk-go-v2/service/rds](https://github.com/aws/aws-sdk-go-v2) | `1.64.5` | `1.64.6` | | [github.com/aws/aws-sdk-go-v2/service/rolesanywhere](https://github.com/aws/aws-sdk-go-v2) | `1.6.5` | `1.6.6` | | [github.com/aws/aws-sdk-go-v2/service/workspaces](https://github.com/aws/aws-sdk-go-v2) | `1.35.5` | `1.35.6` | Updates `github.com/aws/aws-sdk-go` from 1.49.2 to 1.49.4 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.2...v1.49.4) Updates `github.com/aws/aws-sdk-go-v2/service/controltower` from 1.10.5 to 1.10.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.10.5...service/rum/v1.10.6) Updates `github.com/aws/aws-sdk-go-v2/service/rds` from 1.64.5 to 1.64.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.64.5...service/rds/v1.64.6) Updates `github.com/aws/aws-sdk-go-v2/service/rolesanywhere` from 1.6.5 to 1.6.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/credentials/v1.6.5...service/rum/v1.6.6) Updates `github.com/aws/aws-sdk-go-v2/service/workspaces` from 1.35.5 to 1.35.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecs/v1.35.5...service/ssm/v1.35.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/controltower dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/rds dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/rolesanywhere dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/workspaces dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 10 +++++----- go.sum | 20 ++++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/go.mod b/go.mod index 78a783a30a1..9f200c85948 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.2 + github.com/aws/aws-sdk-go v1.49.4 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.26.1 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 @@ -33,7 +33,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/comprehend v1.29.5 github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.31.5 github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5 - github.com/aws/aws-sdk-go-v2/service/controltower v1.10.5 + github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6 github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5 github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 @@ -70,12 +70,12 @@ require ( github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3 - github.com/aws/aws-sdk-go-v2/service/rds v1.64.5 + github.com/aws/aws-sdk-go-v2/service/rds v1.64.6 github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5 github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5 github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5 github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 - github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.5 + github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5 @@ -97,7 +97,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/transcribe v1.34.5 github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3 github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5 - github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.5 + github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 github.com/aws/aws-sdk-go-v2/service/xray v1.23.5 github.com/beevik/etree v1.2.0 github.com/davecgh/go-spew v1.1.1 diff --git a/go.sum b/go.sum index dd14e373ab9..52445e50244 100644 --- a/go.sum +++ b/go.sum @@ -21,8 +21,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.2 h1:+4BEcm1nPCoDbVd+gg8cdxpa1qJfrvnddy12vpEVWjw= -github.com/aws/aws-sdk-go v1.49.2/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= +github.com/aws/aws-sdk-go v1.49.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -89,8 +89,8 @@ github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.31.5 h1:64f/3D7gFxW/wAO github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.31.5/go.mod h1:BlYY4hg0e2n3xrU/En0syXSD5KhHeDNna/aETUe0I1I= github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5 h1:66XGF7vdSc6XpG7xOg2zt1fW1FzY1LB2BQardkGxK0M= github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5/go.mod h1:R9o2YFsOY6PTlfFPacDGKL5cgesr3+ZTXA5i3PhOai4= -github.com/aws/aws-sdk-go-v2/service/controltower v1.10.5 h1:wUKgXgstAjFFSP+70DtAqnk5QXOoCzP9lzf6xqtgSJo= -github.com/aws/aws-sdk-go-v2/service/controltower v1.10.5/go.mod h1:HIRn9vSg38bhAI8BlxIWXl/i8qPruJzon9kPOeD31Ng= +github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6 h1:Sb6qOCo2oD9iGJ+0gyCK/bQDNqfk9vH9rTwXsCvG8Ik= +github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6/go.mod h1:HIRn9vSg38bhAI8BlxIWXl/i8qPruJzon9kPOeD31Ng= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5 h1:a//AdeswzibpC4fkkB1X4Ql/4iWZKGyYV0lWNTRDp1w= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5/go.mod h1:Dst4mNfdyggL9PHmkYdSiVgJvwhfboruXtzQZpy46Xs= github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 h1:i/7aXIrjTdVZtch90MSQ3EC03dh5XgTmJtbAqFtzysk= @@ -177,8 +177,8 @@ github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 h1:dzxL7EqY37jp4AGBbMXyZT+koN8 github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5/go.mod h1:tN5rVxOznGnV6y5gXixoL83vMOAuPTFAnqafo813M8A= github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3 h1:5rT2pGAFgU2c/nkAZM2iDVVkLceQ04XFgkeWxKM04/4= github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3/go.mod h1:yX/8MJOGKdhrLvzOHppNzJvBQh5OKocDq4sP3CtXxgE= -github.com/aws/aws-sdk-go-v2/service/rds v1.64.5 h1:HzkVXbafwf/N+uwNzuXaOpXwG2z8mi7nYFRKHeH/hFQ= -github.com/aws/aws-sdk-go-v2/service/rds v1.64.5/go.mod h1:MYzRMSdY70kcS8AFg0aHmk/xj6VAe0UfaCCoLrBWPow= +github.com/aws/aws-sdk-go-v2/service/rds v1.64.6 h1:5aUu86tGOprdKtoIClCYPC6i4xalRDztBOlXgJnQFHk= +github.com/aws/aws-sdk-go-v2/service/rds v1.64.6/go.mod h1:MYzRMSdY70kcS8AFg0aHmk/xj6VAe0UfaCCoLrBWPow= github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5 h1:jGGtFvVJ7RwXtAYOxLoUzWw5WjvsO1NYWuMawL64gZU= github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5/go.mod h1:nJQaSBV7r9td6WMmDDGKtlwE8D9BIDEDIpANfN+gMPE= github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5 h1:7+BV1yNEchDbrgg/hdPVAi3jomqkoI5lqcQcTWTunGA= @@ -187,8 +187,8 @@ github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5 h1:WDwFoNiIKvLkQJPSY github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5/go.mod h1:kHgibL7mHteV68QqxEWk/+GfSioAUZGBlz4e3Vs2r60= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 h1:vINTeQlqUbYkyKichayWejWqsMNya35Mj7XBcUZnwVI= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5/go.mod h1:Nngchp1Q7LNBS8J10r4P0npfroNRaCVz6wWNfBz7j4E= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.5 h1:rpTfpdg8k0f9bOKfu/eHAj81Ic+qhpiD1HJkDVP/m6Q= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.5/go.mod h1:e2+mEoq1rHtFpX8p6WcgiFgnDz0zG6y1BY/g8us9g2I= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 h1:K//BccrDBRMSQCa4UkVVYCp2y4z77arQiT2TYl88wY0= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6/go.mod h1:e2+mEoq1rHtFpX8p6WcgiFgnDz0zG6y1BY/g8us9g2I= github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 h1:WDr8iQXuDzL6ERqRvpdIy1ZdOjg6lXlEHSo8wOJiOyI= github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5/go.mod h1:7fnaaVoKfZaWJ8RuNYTYV3SkqD6BkFYlRuFDEkHajpc= github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY= @@ -235,8 +235,8 @@ github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3 h1:1L+/ZK8nGuc1H github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3/go.mod h1:QJoz7ojCJ/cT0q9sV+K9ZZBETBVoSpJXyRzvEt4BuSg= github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5 h1:8AV6s1CjF1Kg4wI4Cru0vFRiQALPe3T/THLkPGCbQo0= github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5/go.mod h1:Avxrq4VqhpuKgGdZifhrJP5a9DsDt7cESkdhaZHnYp0= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.5 h1:f0Ci0wO9AnBjpeeZjahBD41gib79vdaPcipk29MPKXs= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.5/go.mod h1:vkYsJdF9sZl/o1eoK8tSSjzAT+R87QjswOGSTZfyO0Y= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 h1:RrpjQ5xJN/AW0PCO7EGhhVsKq7BeNqkx5+h6p3QOeTU= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6/go.mod h1:vkYsJdF9sZl/o1eoK8tSSjzAT+R87QjswOGSTZfyO0Y= github.com/aws/aws-sdk-go-v2/service/xray v1.23.5 h1:uCqKSGx5Esj9ZW6/zZ7tslkM65aH+qjHO3yboiRqcLo= github.com/aws/aws-sdk-go-v2/service/xray v1.23.5/go.mod h1:VmWKTNu6V1qRG+skNKkYt7VOFohYdtOp7B2OSvpBZac= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= From 60da92a8ddbed32c9f102635be41411bdaf46bbf Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:52:02 -0500 Subject: [PATCH 300/438] Add 'flex.StringValueToInt64Value'. --- internal/flex/flex.go | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/internal/flex/flex.go b/internal/flex/flex.go index f7e12d1d02b..b5ff60a7cb0 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -317,11 +317,17 @@ func StringToIntValue(v *string) int { return i } -// StringValueToInt64 converts a string to a Go int64 pointer value. +// StringValueToInt64 converts a string to a Go int64 pointer. // Invalid integer strings are converted to 0. func StringValueToInt64(v string) *int64 { - i, _ := strconv.Atoi(v) - return aws.Int64(int64(i)) + return aws.Int64(StringValueToInt64Value(v)) +} + +// StringValueToInt64Value converts a string to a Go int64 value. +// Invalid integer strings are converted to 0. +func StringValueToInt64Value(v string) int64 { + i, _ := strconv.ParseInt(v, 0, 64) + return i } // Takes a string of resource attributes separated by the ResourceIdSeparator constant From 2873e18d9c3375847c14fd58218ca39bdcf27b08 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 10:55:19 -0500 Subject: [PATCH 301/438] r/aws_dms_replication_task: Correct update of 'cdc_start_time'. --- internal/service/dms/replication_task.go | 31 +++++++++--------------- 1 file changed, 12 insertions(+), 19 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 29d6e57a83a..f0ba638f710 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -8,7 +8,6 @@ import ( "encoding/json" "fmt" "log" - "strconv" "time" "github.com/aws/aws-sdk-go/aws" @@ -20,6 +19,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" + "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" @@ -135,18 +135,11 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("cdc_start_time"); ok { - // Check if input is RFC3339 date string or UNIX timestamp. - dateTime, err := time.Parse(time.RFC3339, v.(string)) - - if err != nil { - // Not a valid RF3339 date, checking if this is a UNIX timestamp. - seconds, err := strconv.ParseInt(v.(string), 10, 64) - if err != nil { - return sdkdiag.AppendErrorf(diags, "DMS create replication task. Invalid Unix timestamp given for cdc_start_time parameter: %s", err) - } - request.CdcStartTime = aws.Time(time.Unix(seconds, 0)) + v := v.(string) + if t, err := time.Parse(time.RFC3339, v); err != nil { + request.CdcStartTime = aws.Time(time.Unix(flex.StringValueToInt64Value(v), 0)) } else { - request.CdcStartTime = aws.Time(dateTime) + request.CdcStartTime = aws.Time(t) } } @@ -232,14 +225,14 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("cdc_start_time") { - // Parse the RFC3339 date string into a time.Time object - dateTime, err := time.Parse(time.RFC3339, d.Get("cdc_start_time").(string)) - - if err != nil { - return sdkdiag.AppendErrorf(diags, "DMS update replication task. Invalid cdc_start_time value: %s", err) + if v, ok := d.GetOk("cdc_start_time"); ok { + v := v.(string) + if t, err := time.Parse(time.RFC3339, v); err != nil { + input.CdcStartTime = aws.Time(time.Unix(flex.StringValueToInt64Value(v), 0)) + } else { + input.CdcStartTime = aws.Time(t) + } } - - input.CdcStartTime = aws.Time(dateTime) } if d.HasChange("replication_task_settings") { From c45951e1f2a1e7389255da83c0328d44770b2c9f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 10:56:35 -0500 Subject: [PATCH 302/438] build(deps): bump dawidd6/action-download-artifact from 2.28.0 to 3.0.0 (#34920) Bumps [dawidd6/action-download-artifact](https://github.com/dawidd6/action-download-artifact) from 2.28.0 to 3.0.0. - [Release notes](https://github.com/dawidd6/action-download-artifact/releases) - [Commits](https://github.com/dawidd6/action-download-artifact/compare/268677152d06ba59fcec7a7f0b5d961b6ccd7e1e...e7466d1a7587ed14867642c2ca74b5bcc1e19a2d) --- updated-dependencies: - dependency-name: dawidd6/action-download-artifact dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/post_publish.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/post_publish.yml b/.github/workflows/post_publish.yml index 65c36399a03..f692ea70743 100644 --- a/.github/workflows/post_publish.yml +++ b/.github/workflows/post_publish.yml @@ -19,7 +19,7 @@ jobs: steps: - if: github.event_name == 'workflow_run' name: Download Artifact from Release workflow - uses: dawidd6/action-download-artifact@268677152d06ba59fcec7a7f0b5d961b6ccd7e1e # v2.28.0 + uses: dawidd6/action-download-artifact@e7466d1a7587ed14867642c2ca74b5bcc1e19a2d # v3.0.0 with: workflow: release.yml name: release-tag From 1191fdea94e5c935e4481f62f2ee6d90000dac13 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Dec 2023 10:58:20 -0500 Subject: [PATCH 303/438] build(deps): bump github.com/hashicorp/terraform-plugin-sdk/v2 (#34942) Bumps [github.com/hashicorp/terraform-plugin-sdk/v2](https://github.com/hashicorp/terraform-plugin-sdk) from 2.30.0 to 2.31.0. - [Release notes](https://github.com/hashicorp/terraform-plugin-sdk/releases) - [Changelog](https://github.com/hashicorp/terraform-plugin-sdk/blob/main/CHANGELOG.md) - [Commits](https://github.com/hashicorp/terraform-plugin-sdk/compare/v2.30.0...v2.31.0) --- updated-dependencies: - dependency-name: github.com/hashicorp/terraform-plugin-sdk/v2 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .ci/providerlint/go.mod | 30 +- .ci/providerlint/go.sum | 73 +- .../hashicorp/go-plugin/CHANGELOG.md | 23 + .../hashicorp/go-plugin/buf.gen.yaml | 14 + .../github.com/hashicorp/go-plugin/buf.yaml | 7 + .../github.com/hashicorp/go-plugin/client.go | 137 +- .../hashicorp/go-plugin/constants.go | 9 +- .../hashicorp/go-plugin/grpc_broker.go | 207 +- .../hashicorp/go-plugin/grpc_client.go | 7 +- .../hashicorp/go-plugin/grpc_server.go | 5 +- .../grpcmux/blocked_client_listener.go | 51 + .../grpcmux/blocked_server_listener.go | 49 + .../internal/grpcmux/grpc_client_muxer.go | 105 + .../go-plugin/internal/grpcmux/grpc_muxer.go | 41 + .../internal/grpcmux/grpc_server_muxer.go | 190 ++ .../go-plugin/internal/plugin/gen.go | 6 - .../internal/plugin/grpc_broker.pb.go | 347 ++- .../internal/plugin/grpc_broker.proto | 8 +- .../internal/plugin/grpc_broker_grpc.pb.go | 142 + .../internal/plugin/grpc_controller.pb.go | 224 +- .../internal/plugin/grpc_controller.proto | 2 +- .../plugin/grpc_controller_grpc.pb.go | 110 + .../internal/plugin/grpc_stdio.pb.go | 356 ++- .../internal/plugin/grpc_stdio.proto | 2 +- .../internal/plugin/grpc_stdio_grpc.pb.go | 148 + .../github.com/hashicorp/go-plugin/server.go | 56 +- .../github.com/hashicorp/go-plugin/testing.go | 56 +- .../hashicorp/hc-install/.go-version | 2 +- .../hashicorp/hc-install/version/VERSION | 2 +- .../hashicorp/terraform-json/README.md | 34 +- .../hashicorp/terraform-json/plan.go | 4 + .../hashicorp/terraform-json/state.go | 2 +- .../internal/logging/keys.go | 3 + .../tfprotov5/diagnostic.go | 4 + .../terraform-plugin-go/tfprotov5/function.go | 141 + .../tfprotov5/internal/diag/diagnostics.go | 4 + .../tfprotov5/internal/fromproto/function.go | 36 + .../internal/tfplugin5/tfplugin5.pb.go | 2472 +++++++++++------ .../internal/tfplugin5/tfplugin5.proto | 114 +- .../internal/tfplugin5/tfplugin5_grpc.pb.go | 84 +- .../tfprotov5/internal/toproto/diagnostic.go | 7 +- .../tfprotov5/internal/toproto/function.go | 174 ++ .../tfprotov5/internal/toproto/provider.go | 28 +- .../terraform-plugin-go/tfprotov5/provider.go | 21 + .../tfprotov5/tf5server/server.go | 120 + .../tfprotov6/diagnostic.go | 4 + .../terraform-plugin-go/tfprotov6/function.go | 141 + .../tfprotov6/internal/diag/diagnostics.go | 4 + .../tfprotov6/internal/fromproto/function.go | 36 + .../internal/tfplugin6/tfplugin6.pb.go | 2302 ++++++++++----- .../internal/tfplugin6/tfplugin6.proto | 114 +- .../internal/tfplugin6/tfplugin6_grpc.pb.go | 84 +- .../tfprotov6/internal/toproto/diagnostic.go | 7 +- .../tfprotov6/internal/toproto/function.go | 174 ++ .../tfprotov6/internal/toproto/provider.go | 28 +- .../terraform-plugin-go/tfprotov6/provider.go | 21 + .../tfprotov6/tf6server/server.go | 120 + .../v2/helper/schema/grpc_provider.go | 32 + .../terraform-plugin-sdk/v2/meta/meta.go | 2 +- .../vendor/github.com/hashicorp/yamux/addr.go | 2 +- .../github.com/hashicorp/yamux/const.go | 27 +- .../vendor/github.com/hashicorp/yamux/mux.go | 16 + .../github.com/hashicorp/yamux/session.go | 139 +- .../github.com/hashicorp/yamux/stream.go | 90 +- .../vmihailenco/msgpack/v5/CHANGELOG.md | 26 +- .../vmihailenco/msgpack/v5/README.md | 30 +- .../vmihailenco/msgpack/v5/decode.go | 81 +- .../vmihailenco/msgpack/v5/decode_map.go | 39 +- .../vmihailenco/msgpack/v5/decode_query.go | 3 +- .../vmihailenco/msgpack/v5/decode_slice.go | 23 +- .../vmihailenco/msgpack/v5/decode_typgen.go | 46 + .../vmihailenco/msgpack/v5/decode_value.go | 7 +- .../vmihailenco/msgpack/v5/encode.go | 21 +- .../vmihailenco/msgpack/v5/encode_map.go | 48 +- .../vmihailenco/msgpack/v5/encode_value.go | 9 + .../github.com/vmihailenco/msgpack/v5/ext.go | 8 +- .../vmihailenco/msgpack/v5/intern.go | 18 +- .../vmihailenco/msgpack/v5/msgpack.go | 2 +- .../vmihailenco/msgpack/v5/package.json | 2 +- .../github.com/vmihailenco/msgpack/v5/time.go | 8 +- .../vmihailenco/msgpack/v5/types.go | 24 +- .../vmihailenco/msgpack/v5/version.go | 2 +- .../vendor/golang.org/x/mod/modfile/rule.go | 12 +- .../golang.org/x/net/context/context.go | 56 - .../vendor/golang.org/x/net/context/go17.go | 73 - .../vendor/golang.org/x/net/context/go19.go | 21 - .../golang.org/x/net/context/pre_go17.go | 301 -- .../golang.org/x/net/context/pre_go19.go | 110 - .../golang.org/x/net/http2/databuffer.go | 59 +- .../vendor/golang.org/x/net/http2/go111.go | 30 - .../vendor/golang.org/x/net/http2/go115.go | 27 - .../vendor/golang.org/x/net/http2/go118.go | 17 - .../golang.org/x/net/http2/not_go111.go | 21 - .../golang.org/x/net/http2/not_go115.go | 31 - .../golang.org/x/net/http2/not_go118.go | 17 - .../vendor/golang.org/x/net/http2/server.go | 24 +- .../golang.org/x/net/http2/transport.go | 33 +- .../vendor/golang.org/x/net/idna/go118.go | 1 - .../golang.org/x/net/idna/idna10.0.0.go | 1 - .../vendor/golang.org/x/net/idna/idna9.0.0.go | 1 - .../vendor/golang.org/x/net/idna/pre_go118.go | 1 - .../golang.org/x/net/idna/tables10.0.0.go | 1 - .../golang.org/x/net/idna/tables11.0.0.go | 1 - .../golang.org/x/net/idna/tables12.0.0.go | 1 - .../golang.org/x/net/idna/tables13.0.0.go | 1 - .../golang.org/x/net/idna/tables15.0.0.go | 1 - .../golang.org/x/net/idna/tables9.0.0.go | 1 - .../golang.org/x/net/idna/trie12.0.0.go | 1 - .../golang.org/x/net/idna/trie13.0.0.go | 1 - .../vendor/golang.org/x/sys/unix/fcntl.go | 2 +- .../golang.org/x/sys/unix/ioctl_linux.go | 5 + .../vendor/golang.org/x/sys/unix/mkerrors.sh | 3 +- .../golang.org/x/sys/unix/syscall_bsd.go | 2 +- .../golang.org/x/sys/unix/syscall_linux.go | 28 +- .../golang.org/x/sys/unix/syscall_openbsd.go | 14 + .../golang.org/x/sys/unix/syscall_solaris.go | 2 +- .../x/sys/unix/syscall_zos_s390x.go | 2 +- .../golang.org/x/sys/unix/zerrors_linux.go | 2 +- .../golang.org/x/sys/unix/zsyscall_linux.go | 15 + .../x/sys/unix/zsyscall_openbsd_386.go | 26 + .../x/sys/unix/zsyscall_openbsd_386.s | 5 + .../x/sys/unix/zsyscall_openbsd_amd64.go | 26 + .../x/sys/unix/zsyscall_openbsd_amd64.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm.go | 26 + .../x/sys/unix/zsyscall_openbsd_arm.s | 5 + .../x/sys/unix/zsyscall_openbsd_arm64.go | 26 + .../x/sys/unix/zsyscall_openbsd_arm64.s | 5 + .../x/sys/unix/zsyscall_openbsd_mips64.go | 26 + .../x/sys/unix/zsyscall_openbsd_mips64.s | 5 + .../x/sys/unix/zsyscall_openbsd_ppc64.go | 26 + .../x/sys/unix/zsyscall_openbsd_ppc64.s | 6 + .../x/sys/unix/zsyscall_openbsd_riscv64.go | 26 + .../x/sys/unix/zsyscall_openbsd_riscv64.s | 5 + .../golang.org/x/sys/unix/ztypes_linux.go | 32 + .../google.golang.org/appengine/.travis.yml | 18 - .../appengine/CONTRIBUTING.md | 6 +- .../google.golang.org/appengine/README.md | 6 +- .../google.golang.org/appengine/appengine.go | 23 +- .../appengine/appengine_vm.go | 12 +- .../appengine/datastore/datastore.go | 2 +- .../appengine/datastore/doc.go | 21 +- .../appengine/datastore/key.go | 2 +- .../appengine/datastore/keycompat.go | 3 +- .../appengine/datastore/metadata.go | 17 +- .../appengine/datastore/query.go | 6 +- .../appengine/datastore/transaction.go | 3 +- .../google.golang.org/appengine/identity.go | 3 +- .../appengine/internal/api.go | 347 ++- .../appengine/internal/api_classic.go | 29 +- .../appengine/internal/api_common.go | 50 +- .../appengine/internal/identity.go | 7 +- .../appengine/internal/identity_classic.go | 23 +- .../appengine/internal/identity_flex.go | 1 + .../appengine/internal/identity_vm.go | 20 +- .../appengine/internal/main.go | 1 + .../appengine/internal/main_vm.go | 3 +- .../appengine/internal/transaction.go | 10 +- .../google.golang.org/appengine/namespace.go | 3 +- .../google.golang.org/appengine/timeout.go | 2 +- .../appengine/travis_install.sh | 18 - .../appengine/travis_test.sh | 12 - .../vendor/google.golang.org/grpc/README.md | 60 +- .../grpc/attributes/attributes.go | 35 +- .../grpc/balancer/balancer.go | 62 +- .../grpc/balancer/base/balancer.go | 22 +- ...r_conn_wrappers.go => balancer_wrapper.go} | 337 +-- .../grpc_binarylog_v1/binarylog.pb.go | 2 +- .../vendor/google.golang.org/grpc/call.go | 11 +- .../google.golang.org/grpc/clientconn.go | 593 ++-- .../vendor/google.golang.org/grpc/codec.go | 8 +- .../google.golang.org/grpc/codes/codes.go | 8 +- .../google.golang.org/grpc/credentials/tls.go | 75 +- .../google.golang.org/grpc/dialoptions.go | 65 +- .../grpc/encoding/encoding.go | 17 +- .../grpc/encoding/proto/proto.go | 4 +- .../grpc/grpclog/component.go | 40 +- .../google.golang.org/grpc/grpclog/grpclog.go | 30 +- .../google.golang.org/grpc/grpclog/logger.go | 30 +- .../grpc/grpclog/loggerv2.go | 56 +- .../google.golang.org/grpc/health/client.go | 2 +- .../grpc/health/grpc_health_v1/health.pb.go | 2 +- .../health/grpc_health_v1/health_grpc.pb.go | 22 +- .../vendor/google.golang.org/grpc/idle.go | 287 -- .../google.golang.org/grpc/interceptor.go | 12 +- .../grpc/internal/backoff/backoff.go | 36 + .../balancer/gracefulswitch/gracefulswitch.go | 59 +- .../grpc/internal/balancerload/load.go | 4 +- .../grpc/internal/binarylog/method_logger.go | 4 +- .../grpc/internal/buffer/unbounded.go | 57 +- .../grpc/internal/channelz/funcs.go | 76 +- .../grpc/internal/channelz/logging.go | 12 +- .../grpc/internal/channelz/types.go | 5 + .../grpc/internal/channelz/util_linux.go | 2 +- .../grpc/internal/channelz/util_nonlinux.go | 2 +- .../grpc/internal/credentials/credentials.go | 8 +- .../grpc/internal/envconfig/envconfig.go | 8 +- .../grpc/internal/envconfig/xds.go | 39 - .../grpc/internal/experimental.go | 28 + .../grpc/internal/grpclog/grpclog.go | 40 +- .../grpc/internal/grpclog/prefixLogger.go | 8 +- .../internal/grpcsync/callback_serializer.go | 75 +- .../grpc/internal/grpcsync/pubsub.go | 43 +- .../grpc/internal/idle/idle.go | 278 ++ .../grpc/internal/internal.go | 58 +- .../grpc/internal/metadata/metadata.go | 2 +- .../grpc/internal/pretty/pretty.go | 2 +- .../grpc/internal/resolver/config_selector.go | 4 +- .../internal/resolver/dns/dns_resolver.go | 69 +- .../resolver/dns/internal/internal.go | 70 + .../grpc/internal/status/status.go | 36 +- .../grpc/internal/tcp_keepalive_nonunix.go | 29 + .../grpc/internal/tcp_keepalive_unix.go | 54 + .../grpc/internal/transport/controlbuf.go | 16 +- .../grpc/internal/transport/handler_server.go | 76 +- .../grpc/internal/transport/http2_client.go | 68 +- .../grpc/internal/transport/http2_server.go | 121 +- .../grpc/internal/transport/http_util.go | 77 +- .../grpc/internal/transport/proxy.go | 14 +- .../grpc/internal/transport/transport.go | 39 +- .../grpc/metadata/metadata.go | 18 +- .../google.golang.org/grpc/peer/peer.go | 2 + .../google.golang.org/grpc/picker_wrapper.go | 53 +- .../google.golang.org/grpc/pickfirst.go | 76 +- .../google.golang.org/grpc/preloader.go | 2 +- .../grpc_reflection_v1/reflection.pb.go | 2 +- .../grpc_reflection_v1alpha/reflection.pb.go | 2 +- .../grpc/resolver/dns/dns_resolver.go | 36 + .../google.golang.org/grpc/resolver/map.go | 123 +- .../grpc/resolver/resolver.go | 86 +- .../grpc/resolver_conn_wrapper.go | 239 -- .../grpc/resolver_wrapper.go | 197 ++ .../vendor/google.golang.org/grpc/rpc_util.go | 17 +- .../vendor/google.golang.org/grpc/server.go | 411 +-- .../grpc/shared_buffer_pool.go | 4 +- .../google.golang.org/grpc/stats/stats.go | 14 +- .../google.golang.org/grpc/status/status.go | 14 +- .../vendor/google.golang.org/grpc/stream.go | 126 +- .../vendor/google.golang.org/grpc/tap/tap.go | 6 + .../vendor/google.golang.org/grpc/trace.go | 6 +- .../vendor/google.golang.org/grpc/version.go | 2 +- .../vendor/google.golang.org/grpc/vet.sh | 174 +- .ci/providerlint/vendor/modules.txt | 43 +- 242 files changed, 10426 insertions(+), 5740 deletions(-) create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.yaml create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go delete mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go create mode 100644 .ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go create mode 100644 .ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/context/context.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/context/go17.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/context/go19.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/context/pre_go17.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/context/pre_go19.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/http2/go111.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/http2/go115.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/http2/go118.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/http2/not_go111.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/http2/not_go115.go delete mode 100644 .ci/providerlint/vendor/golang.org/x/net/http2/not_go118.go delete mode 100644 .ci/providerlint/vendor/google.golang.org/appengine/.travis.yml delete mode 100644 .ci/providerlint/vendor/google.golang.org/appengine/travis_install.sh delete mode 100644 .ci/providerlint/vendor/google.golang.org/appengine/travis_test.sh rename .ci/providerlint/vendor/google.golang.org/grpc/{balancer_conn_wrappers.go => balancer_wrapper.go} (52%) delete mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/idle.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/internal/experimental.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/internal/idle/idle.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go delete mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/resolver_conn_wrapper.go create mode 100644 .ci/providerlint/vendor/google.golang.org/grpc/resolver_wrapper.go diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 5e7746396f9..ab9b94f28d8 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -5,7 +5,7 @@ go 1.20 require ( github.com/aws/aws-sdk-go v1.49.4 github.com/bflad/tfproviderlint v0.29.0 - github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 golang.org/x/tools v0.13.0 ) @@ -24,19 +24,19 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.5.1 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.1 // indirect + github.com/hashicorp/hc-install v0.6.2 // indirect github.com/hashicorp/hcl/v2 v2.19.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.19.0 // indirect - github.com/hashicorp/terraform-json v0.17.1 // indirect - github.com/hashicorp/terraform-plugin-go v0.19.0 // indirect + github.com/hashicorp/terraform-json v0.18.0 // indirect + github.com/hashicorp/terraform-plugin-go v0.20.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-registry-address v0.2.2 // indirect + github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/hashicorp/yamux v0.1.1 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.14 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect @@ -46,16 +46,16 @@ require ( github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/oklog/run v1.0.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect - github.com/vmihailenco/msgpack/v5 v5.3.5 // indirect + github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect - golang.org/x/crypto v0.15.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/crypto v0.16.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.18.0 // indirect + golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 // indirect - google.golang.org/grpc v1.57.1 // indirect + google.golang.org/appengine v1.6.8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.60.0 // indirect google.golang.org/protobuf v1.31.0 // indirect ) diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 8a4d4cf6f34..1b1c2f84fcc 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -2,7 +2,6 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -27,12 +26,12 @@ github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= +github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -52,35 +51,35 @@ github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.5.1 h1:oGm7cWBaYIp3lJpx1RUEfLWophprE2EV/KUeqBYo+6k= -github.com/hashicorp/go-plugin v1.5.1/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= -github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= +github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= +github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= -github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= -github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= -github.com/hashicorp/terraform-plugin-go v0.19.0 h1:BuZx/6Cp+lkmiG0cOBk6Zps0Cb2tmqQpDM3iAtnhDQU= -github.com/hashicorp/terraform-plugin-go v0.19.0/go.mod h1:EhRSkEPNoylLQntYsk5KrDHTZJh9HQoumZXbOGOXmec= +github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= +github.com/hashicorp/terraform-json v0.18.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/hashicorp/terraform-plugin-go v0.20.0 h1:oqvoUlL+2EUbKNsJbIt3zqqZ7wi6lzn4ufkn/UA51xQ= +github.com/hashicorp/terraform-plugin-go v0.20.0/go.mod h1:Rr8LBdMlY53a3Z/HpP+ZU3/xCDqtKNCkeI9qOyT10QE= github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0= github.com/hashicorp/terraform-plugin-log v0.9.0/go.mod h1:rKL8egZQ/eXSyDqzLUuwUYLVdlYeamldAHSxjUFADow= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0/go.mod h1:ydFcxbdj6klCqYEPkPvdvFKiNGKZLUs+896ODUXCyao= -github.com/hashicorp/terraform-registry-address v0.2.2 h1:lPQBg403El8PPicg/qONZJDC6YlgCVbWDtNmmZKtBno= -github.com/hashicorp/terraform-registry-address v0.2.2/go.mod h1:LtwNbCihUoUZ3RYriyS2wF/lGPB6gF9ICLRtuDk7hSo= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 h1:Bl3e2ei2j/Z3Hc2HIS15Gal2KMKyLAZ2om1HCEvK6es= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0/go.mod h1:i2C41tszDjiWfziPQDL5R/f3Zp0gahXe5No/MIO9rCE= +github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= +github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= github.com/hashicorp/terraform-svchost v0.1.1/go.mod h1:mNsjQfZyf/Jhz35v6/0LWcv26+X7JPS+buii2c9/ctc= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -113,16 +112,15 @@ github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaUXK79GlxNBwueZn0xI= github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU= -github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= +github.com/vmihailenco/msgpack/v5 v5.4.1 h1:cQriyiUvjTwOHg8QZaPihLWeRAAVoCpE00IUPn0Bjt8= +github.com/vmihailenco/msgpack/v5 v5.4.1/go.mod h1:GaZTsDaehaPpQVyxrf5mtQlH+pc21PIudVV/E3rRQok= github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g= github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= @@ -134,28 +132,27 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= +golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -171,8 +168,8 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -182,6 +179,7 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -198,12 +196,12 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19/go.mod h1:66JfowdXAEgad5O9NnYcsNPLCPZJD++2L9X0PCMODrA= -google.golang.org/grpc v1.57.1 h1:upNTNqv0ES+2ZOOqACwVtS3Il8M12/+Hz41RCPzAjQg= -google.golang.org/grpc v1.57.1/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo= +google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= @@ -212,6 +210,5 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md index ffcfe15431b..3d0379c500e 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/CHANGELOG.md @@ -1,3 +1,26 @@ +## v1.6.0 + +CHANGES: + +* plugin: Plugins written in other languages can optionally start to advertise whether they support gRPC broker multiplexing. + If the environment variable `PLUGIN_MULTIPLEX_GRPC` is set, it is safe to include a seventh field containing a boolean + value in the `|`-separated protocol negotiation line. + +ENHANCEMENTS: + +* Support muxing gRPC broker connections over a single listener [[GH-288](https://github.com/hashicorp/go-plugin/pull/288)] +* client: Configurable buffer size for reading plugin log lines [[GH-265](https://github.com/hashicorp/go-plugin/pull/265)] +* Use `buf` for proto generation [[GH-286](https://github.com/hashicorp/go-plugin/pull/286)] +* deps: bump golang.org/x/net to v0.17.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] +* deps: bump golang.org/x/sys to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] +* deps: bump golang.org/x/text to v0.13.0 [[GH-285](https://github.com/hashicorp/go-plugin/pull/285)] + +## v1.5.2 + +ENHANCEMENTS: + +client: New `UnixSocketConfig.TempDir` option allows setting the directory to use when creating plugin-specific Unix socket directories [[GH-282](https://github.com/hashicorp/go-plugin/pull/282)] + ## v1.5.1 BUGS: diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml new file mode 100644 index 00000000000..033d0153b2a --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.gen.yaml @@ -0,0 +1,14 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: v1 +plugins: + - plugin: buf.build/protocolbuffers/go + out: . + opt: + - paths=source_relative + - plugin: buf.build/grpc/go:v1.3.0 + out: . + opt: + - paths=source_relative + - require_unimplemented_servers=false diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.yaml b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.yaml new file mode 100644 index 00000000000..3d0da4c7199 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/buf.yaml @@ -0,0 +1,7 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +version: v1 +build: + excludes: + - examples/ \ No newline at end of file diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/client.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/client.go index b6024afce0e..73f6b35151c 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/client.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/client.go @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-plugin/internal/cmdrunner" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/hashicorp/go-plugin/runner" "google.golang.org/grpc" ) @@ -63,8 +64,18 @@ var ( // ErrSecureConfigAndReattach is returned when both Reattach and // SecureConfig are set. ErrSecureConfigAndReattach = errors.New("only one of Reattach or SecureConfig can be set") + + // ErrGRPCBrokerMuxNotSupported is returned when the client requests + // multiplexing over the gRPC broker, but the plugin does not support the + // feature. In most cases, this should be resolvable by updating and + // rebuilding the plugin, or restarting the plugin with + // ClientConfig.GRPCBrokerMultiplex set to false. + ErrGRPCBrokerMuxNotSupported = errors.New("client requested gRPC broker multiplexing but plugin does not support the feature") ) +// defaultPluginLogBufferSize is the default size of the buffer used to read from stderr for plugin log lines. +const defaultPluginLogBufferSize = 64 * 1024 + // Client handles the lifecycle of a plugin application. It launches // plugins, connects to them, dispenses interface implementations, and handles // killing the process. @@ -102,6 +113,9 @@ type Client struct { processKilled bool unixSocketCfg UnixSocketConfig + + grpcMuxerOnce sync.Once + grpcMuxer *grpcmux.GRPCClientMuxer } // NegotiatedVersion returns the protocol version negotiated with the server. @@ -209,6 +223,10 @@ type ClientConfig struct { // it will default to hclog's default logger. Logger hclog.Logger + // PluginLogBufferSize is the buffer size(bytes) to read from stderr for plugin log lines. + // If this is 0, then the default of 64KB is used. + PluginLogBufferSize int + // AutoMTLS has the client and server automatically negotiate mTLS for // transport authentication. This ensures that only the original client will // be allowed to connect to the server, and all other connections will be @@ -237,6 +255,19 @@ type ClientConfig struct { // protocol. GRPCDialOptions []grpc.DialOption + // GRPCBrokerMultiplex turns on multiplexing for the gRPC broker. The gRPC + // broker will multiplex all brokered gRPC servers over the plugin's original + // listener socket instead of making a new listener for each server. The + // go-plugin library currently only includes a Go implementation for the + // server (i.e. plugin) side of gRPC broker multiplexing. + // + // Does not support reattaching. + // + // Multiplexed gRPC streams MUST be established sequentially, i.e. after + // calling AcceptAndServe from one side, wait for the other side to Dial + // before calling AcceptAndServe again. + GRPCBrokerMultiplex bool + // SkipHostEnv allows plugins to run without inheriting the parent process' // environment variables. SkipHostEnv bool @@ -252,16 +283,15 @@ type UnixSocketConfig struct { // client process must be a member of this group or chown will fail. Group string - // The directory to create Unix sockets in. Internally managed by go-plugin - // and deleted when the plugin is killed. - directory string -} + // TempDir specifies the base directory to use when creating a plugin-specific + // temporary directory. It is expected to already exist and be writable. If + // not set, defaults to the directory chosen by os.MkdirTemp. + TempDir string -func unixSocketConfigFromEnv() UnixSocketConfig { - return UnixSocketConfig{ - Group: os.Getenv(EnvUnixSocketGroup), - directory: os.Getenv(EnvUnixSocketDir), - } + // The directory to create Unix sockets in. Internally created and managed + // by go-plugin and deleted when the plugin is killed. Will be created + // inside TempDir if specified. + socketDir string } // ReattachConfig is used to configure a client to reattach to an @@ -353,7 +383,7 @@ func CleanupClients() { wg.Wait() } -// Creates a new plugin client which manages the lifecycle of an external +// NewClient creates a new plugin client which manages the lifecycle of an external // plugin and gets the address for the RPC connection. // // The client must be cleaned up at some point by calling Kill(). If @@ -375,10 +405,10 @@ func NewClient(config *ClientConfig) (c *Client) { } if config.SyncStdout == nil { - config.SyncStdout = ioutil.Discard + config.SyncStdout = io.Discard } if config.SyncStderr == nil { - config.SyncStderr = ioutil.Discard + config.SyncStderr = io.Discard } if config.AllowedProtocols == nil { @@ -393,6 +423,10 @@ func NewClient(config *ClientConfig) (c *Client) { }) } + if config.PluginLogBufferSize == 0 { + config.PluginLogBufferSize = defaultPluginLogBufferSize + } + c = &Client{ config: config, logger: config.Logger, @@ -467,7 +501,7 @@ func (c *Client) Kill() { c.l.Lock() runner := c.runner addr := c.address - hostSocketDir := c.unixSocketCfg.directory + hostSocketDir := c.unixSocketCfg.socketDir c.l.Unlock() // If there is no runner or ID, there is nothing to kill. @@ -573,6 +607,10 @@ func (c *Client) Start() (addr net.Addr, err error) { if c.config.SecureConfig != nil && c.config.Reattach != nil { return nil, ErrSecureConfigAndReattach } + + if c.config.GRPCBrokerMultiplex && c.config.Reattach != nil { + return nil, fmt.Errorf("gRPC broker multiplexing is not supported with Reattach config") + } } if c.config.Reattach != nil { @@ -604,6 +642,9 @@ func (c *Client) Start() (addr net.Addr, err error) { fmt.Sprintf("PLUGIN_MAX_PORT=%d", c.config.MaxPort), fmt.Sprintf("PLUGIN_PROTOCOL_VERSIONS=%s", strings.Join(versionStrings, ",")), } + if c.config.GRPCBrokerMultiplex { + env = append(env, fmt.Sprintf("%s=true", envMultiplexGRPC)) + } cmd := c.config.Cmd if cmd == nil { @@ -652,7 +693,7 @@ func (c *Client) Start() (addr net.Addr, err error) { } if c.config.UnixSocketConfig != nil { - c.unixSocketCfg.Group = c.config.UnixSocketConfig.Group + c.unixSocketCfg = *c.config.UnixSocketConfig } if c.unixSocketCfg.Group != "" { @@ -662,22 +703,22 @@ func (c *Client) Start() (addr net.Addr, err error) { var runner runner.Runner switch { case c.config.RunnerFunc != nil: - c.unixSocketCfg.directory, err = os.MkdirTemp("", "plugin-dir") + c.unixSocketCfg.socketDir, err = os.MkdirTemp(c.unixSocketCfg.TempDir, "plugin-dir") if err != nil { return nil, err } // os.MkdirTemp creates folders with 0o700, so if we have a group // configured we need to make it group-writable. if c.unixSocketCfg.Group != "" { - err = setGroupWritable(c.unixSocketCfg.directory, c.unixSocketCfg.Group, 0o770) + err = setGroupWritable(c.unixSocketCfg.socketDir, c.unixSocketCfg.Group, 0o770) if err != nil { return nil, err } } - cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.directory)) - c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.directory) + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", EnvUnixSocketDir, c.unixSocketCfg.socketDir)) + c.logger.Trace("created temporary directory for unix sockets", "dir", c.unixSocketCfg.socketDir) - runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.directory) + runner, err = c.config.RunnerFunc(c.logger, cmd, c.unixSocketCfg.socketDir) if err != nil { return nil, err } @@ -791,7 +832,7 @@ func (c *Client) Start() (addr net.Addr, err error) { // Trim the line and split by "|" in order to get the parts of // the output. line = strings.TrimSpace(line) - parts := strings.SplitN(line, "|", 6) + parts := strings.Split(line, "|") if len(parts) < 4 { errText := fmt.Sprintf("Unrecognized remote plugin message: %s", line) if !ok { @@ -879,6 +920,18 @@ func (c *Client) Start() (addr net.Addr, err error) { return nil, fmt.Errorf("error parsing server cert: %s", err) } } + + if c.config.GRPCBrokerMultiplex && c.protocol == ProtocolGRPC { + if len(parts) <= 6 { + return nil, fmt.Errorf("%w; for Go plugins, you will need to update the "+ + "github.com/hashicorp/go-plugin dependency and recompile", ErrGRPCBrokerMuxNotSupported) + } + if muxSupported, err := strconv.ParseBool(parts[6]); err != nil { + return nil, fmt.Errorf("error parsing %q as a boolean for gRPC broker multiplexing support", parts[6]) + } else if !muxSupported { + return nil, ErrGRPCBrokerMuxNotSupported + } + } } c.address = addr @@ -952,12 +1005,11 @@ func (c *Client) reattach() (net.Addr, error) { if c.config.Reattach.Test { c.negotiatedVersion = c.config.Reattach.ProtocolVersion - } - - // If we're in test mode, we do NOT set the process. This avoids the - // process being killed (the only purpose we have for c.process), since - // in test mode the process is responsible for exiting on its own. - if !c.config.Reattach.Test { + } else { + // If we're in test mode, we do NOT set the runner. This avoids the + // runner being killed (the only purpose we have for setting c.runner + // when reattaching), since in test mode the process is responsible for + // exiting on its own. c.runner = r } @@ -1062,11 +1114,24 @@ func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) // dialer is compatible with grpc.WithDialer and creates the connection // to the plugin. func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { - conn, err := netAddrDialer(c.address)("", timeout) + muxer, err := c.getGRPCMuxer(c.address) if err != nil { return nil, err } + var conn net.Conn + if muxer.Enabled() { + conn, err = muxer.Dial() + if err != nil { + return nil, err + } + } else { + conn, err = netAddrDialer(c.address)("", timeout) + if err != nil { + return nil, err + } + } + // If we have a TLS config we wrap our connection. We only do this // for net/rpc since gRPC uses its own mechanism for TLS. if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { @@ -1076,14 +1141,28 @@ func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) { return conn, nil } -var stdErrBufferSize = 64 * 1024 +func (c *Client) getGRPCMuxer(addr net.Addr) (*grpcmux.GRPCClientMuxer, error) { + if c.protocol != ProtocolGRPC || !c.config.GRPCBrokerMultiplex { + return nil, nil + } + + var err error + c.grpcMuxerOnce.Do(func() { + c.grpcMuxer, err = grpcmux.NewGRPCClientMuxer(c.logger, addr) + }) + if err != nil { + return nil, err + } + + return c.grpcMuxer, nil +} func (c *Client) logStderr(name string, r io.Reader) { defer c.clientWaitGroup.Done() defer c.stderrWaitGroup.Done() l := c.logger.Named(filepath.Base(name)) - reader := bufio.NewReaderSize(r, stdErrBufferSize) + reader := bufio.NewReaderSize(r, c.config.PluginLogBufferSize) // continuation indicates the previous line was a prefix continuation := false diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/constants.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/constants.go index b66fa79993e..e7f5bbe5f7c 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/constants.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/constants.go @@ -4,6 +4,13 @@ package plugin const ( - EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR" + // EnvUnixSocketDir specifies the directory that _plugins_ should create unix + // sockets in. Does not affect client behavior. + EnvUnixSocketDir = "PLUGIN_UNIX_SOCKET_DIR" + + // EnvUnixSocketGroup specifies the owning, writable group to set for Unix + // sockets created by _plugins_. Does not affect client behavior. EnvUnixSocketGroup = "PLUGIN_UNIX_SOCKET_GROUP" + + envMultiplexGRPC = "PLUGIN_MULTIPLEX_GRPC" ) diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_broker.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_broker.go index b86561a017f..5b17e37fef0 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_broker.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_broker.go @@ -14,6 +14,7 @@ import ( "sync/atomic" "time" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/hashicorp/go-plugin/internal/plugin" "github.com/hashicorp/go-plugin/runner" @@ -40,6 +41,8 @@ type sendErr struct { // connection information to/from the plugin. Implements GRPCBrokerServer and // streamer interfaces. type gRPCBrokerServer struct { + plugin.UnimplementedGRPCBrokerServer + // send is used to send connection info to the gRPC stream. send chan *sendErr @@ -263,29 +266,39 @@ func (s *gRPCBrokerClientImpl) Close() { type GRPCBroker struct { nextId uint32 streamer streamer - streams map[uint32]*gRPCBrokerPending tls *tls.Config doneCh chan struct{} o sync.Once + clientStreams map[uint32]*gRPCBrokerPending + serverStreams map[uint32]*gRPCBrokerPending + unixSocketCfg UnixSocketConfig addrTranslator runner.AddrTranslator + dialMutex sync.Mutex + + muxer grpcmux.GRPCMuxer + sync.Mutex } type gRPCBrokerPending struct { ch chan *plugin.ConnInfo doneCh chan struct{} + once sync.Once } -func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator) *GRPCBroker { +func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, addrTranslator runner.AddrTranslator, muxer grpcmux.GRPCMuxer) *GRPCBroker { return &GRPCBroker{ streamer: s, - streams: make(map[uint32]*gRPCBrokerPending), tls: tls, doneCh: make(chan struct{}), + clientStreams: make(map[uint32]*gRPCBrokerPending), + serverStreams: make(map[uint32]*gRPCBrokerPending), + muxer: muxer, + unixSocketCfg: unixSocketCfg, addrTranslator: addrTranslator, } @@ -295,6 +308,42 @@ func newGRPCBroker(s streamer, tls *tls.Config, unixSocketCfg UnixSocketConfig, // // This should not be called multiple times with the same ID at one time. func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { + if b.muxer.Enabled() { + p := b.getServerStream(id) + go func() { + err := b.listenForKnocks(id) + if err != nil { + log.Printf("[ERR]: error listening for knocks, id: %d, error: %s", id, err) + } + }() + + ln, err := b.muxer.Listener(id, p.doneCh) + if err != nil { + return nil, err + } + + ln = &rmListener{ + Listener: ln, + close: func() error { + // We could have multiple listeners on the same ID, so use sync.Once + // for closing doneCh to ensure we don't get a panic. + p.once.Do(func() { + close(p.doneCh) + }) + + b.Lock() + defer b.Unlock() + + // No longer need to listen for knocks once the listener is closed. + delete(b.serverStreams, id) + + return nil + }, + } + + return ln, nil + } + listener, err := serverListener(b.unixSocketCfg) if err != nil { return nil, err @@ -327,20 +376,20 @@ func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) { // connection is opened every call, these calls should be used sparingly. // Multiple gRPC server implementations can be registered to a single // AcceptAndServe call. -func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) { - listener, err := b.Accept(id) +func (b *GRPCBroker) AcceptAndServe(id uint32, newGRPCServer func([]grpc.ServerOption) *grpc.Server) { + ln, err := b.Accept(id) if err != nil { log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err) return } - defer listener.Close() + defer ln.Close() var opts []grpc.ServerOption if b.tls != nil { opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))} } - server := s(opts) + server := newGRPCServer(opts) // Here we use a run group to close this goroutine if the server is shutdown // or the broker is shutdown. @@ -348,7 +397,7 @@ func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc { // Serve on the listener, if shutting down call GracefulStop. g.Add(func() error { - return server.Serve(listener) + return server.Serve(ln) }, func(err error) { server.GracefulStop() }) @@ -381,12 +430,108 @@ func (b *GRPCBroker) Close() error { return nil } +func (b *GRPCBroker) listenForKnocks(id uint32) error { + p := b.getServerStream(id) + for { + select { + case msg := <-p.ch: + // Shouldn't be possible. + if msg.ServiceId != id { + return fmt.Errorf("knock received with wrong service ID; expected %d but got %d", id, msg.ServiceId) + } + + // Also shouldn't be possible. + if msg.Knock == nil || !msg.Knock.Knock || msg.Knock.Ack { + return fmt.Errorf("knock received for service ID %d with incorrect values; knock=%+v", id, msg.Knock) + } + + // Successful knock, open the door for the given ID. + var ackError string + err := b.muxer.AcceptKnock(id) + if err != nil { + ackError = err.Error() + } + + // Send back an acknowledgement to allow the client to start dialling. + err = b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Knock: &plugin.ConnInfo_Knock{ + Knock: true, + Ack: true, + Error: ackError, + }, + }) + if err != nil { + return fmt.Errorf("error sending back knock acknowledgement: %w", err) + } + case <-p.doneCh: + return nil + } + } +} + +func (b *GRPCBroker) knock(id uint32) error { + // Send a knock. + err := b.streamer.Send(&plugin.ConnInfo{ + ServiceId: id, + Knock: &plugin.ConnInfo_Knock{ + Knock: true, + }, + }) + if err != nil { + return err + } + + // Wait for the ack. + p := b.getClientStream(id) + select { + case msg := <-p.ch: + if msg.ServiceId != id { + return fmt.Errorf("handshake failed for multiplexing on id %d; got response for %d", id, msg.ServiceId) + } + if msg.Knock == nil || !msg.Knock.Knock || !msg.Knock.Ack { + return fmt.Errorf("handshake failed for multiplexing on id %d; expected knock and ack, but got %+v", id, msg.Knock) + } + if msg.Knock.Error != "" { + return fmt.Errorf("failed to knock for id %d: %s", id, msg.Knock.Error) + } + case <-time.After(5 * time.Second): + return fmt.Errorf("timeout waiting for multiplexing knock handshake on id %d", id) + } + + return nil +} + +func (b *GRPCBroker) muxDial(id uint32) func(string, time.Duration) (net.Conn, error) { + return func(string, time.Duration) (net.Conn, error) { + b.dialMutex.Lock() + defer b.dialMutex.Unlock() + + // Tell the other side the listener ID it should give the next stream to. + err := b.knock(id) + if err != nil { + return nil, fmt.Errorf("failed to knock before dialling client: %w", err) + } + + conn, err := b.muxer.Dial() + if err != nil { + return nil, err + } + + return conn, nil + } +} + // Dial opens a connection by ID. func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) { + if b.muxer.Enabled() { + return dialGRPCConn(b.tls, b.muxDial(id)) + } + var c *plugin.ConnInfo // Open the stream - p := b.getStream(id) + p := b.getClientStream(id) select { case c = <-p.ch: close(p.doneCh) @@ -434,37 +579,63 @@ func (m *GRPCBroker) NextId() uint32 { // the plugin host/client. func (m *GRPCBroker) Run() { for { - stream, err := m.streamer.Recv() + msg, err := m.streamer.Recv() if err != nil { // Once we receive an error, just exit break } // Initialize the waiter - p := m.getStream(stream.ServiceId) + var p *gRPCBrokerPending + if msg.Knock != nil && msg.Knock.Knock && !msg.Knock.Ack { + p = m.getServerStream(msg.ServiceId) + // The server side doesn't close the channel immediately as it needs + // to continuously listen for knocks. + } else { + p = m.getClientStream(msg.ServiceId) + go m.timeoutWait(msg.ServiceId, p) + } select { - case p.ch <- stream: + case p.ch <- msg: default: } + } +} - go m.timeoutWait(stream.ServiceId, p) +// getClientStream is a buffer to receive new connection info and knock acks +// by stream ID. +func (m *GRPCBroker) getClientStream(id uint32) *gRPCBrokerPending { + m.Lock() + defer m.Unlock() + + p, ok := m.clientStreams[id] + if ok { + return p + } + + m.clientStreams[id] = &gRPCBrokerPending{ + ch: make(chan *plugin.ConnInfo, 1), + doneCh: make(chan struct{}), } + return m.clientStreams[id] } -func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending { +// getServerStream is a buffer to receive knocks to a multiplexed stream ID +// that its side is listening on. Not used unless multiplexing is enabled. +func (m *GRPCBroker) getServerStream(id uint32) *gRPCBrokerPending { m.Lock() defer m.Unlock() - p, ok := m.streams[id] + p, ok := m.serverStreams[id] if ok { return p } - m.streams[id] = &gRPCBrokerPending{ + m.serverStreams[id] = &gRPCBrokerPending{ ch: make(chan *plugin.ConnInfo, 1), doneCh: make(chan struct{}), } - return m.streams[id] + return m.serverStreams[id] } func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { @@ -479,5 +650,5 @@ func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) { defer m.Unlock() // Delete the stream so no one else can grab it - delete(m.streams, id) + delete(m.clientStreams, id) } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_client.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_client.go index 583e42503c8..627649d8394 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_client.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_client.go @@ -61,9 +61,14 @@ func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) { return nil, err } + muxer, err := c.getGRPCMuxer(c.address) + if err != nil { + return nil, err + } + // Start the broker. brokerGRPCClient := newGRPCBrokerClient(conn) - broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner) + broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig, c.unixSocketCfg, c.runner, muxer) go broker.Run() go brokerGRPCClient.StartStream() diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_server.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_server.go index 369f958aeea..a5f40c7f06e 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_server.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/grpc_server.go @@ -12,6 +12,7 @@ import ( "net" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/hashicorp/go-plugin/internal/plugin" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -61,6 +62,8 @@ type GRPCServer struct { stdioServer *grpcStdioServer logger hclog.Logger + + muxer *grpcmux.GRPCServerMuxer } // ServerProtocol impl. @@ -84,7 +87,7 @@ func (s *GRPCServer) Init() error { // Register the broker service brokerServer := newGRPCBrokerServer() plugin.RegisterGRPCBrokerServer(s.server, brokerServer) - s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil) + s.broker = newGRPCBroker(brokerServer, s.TLS, unixSocketConfigFromEnv(), nil, s.muxer) go s.broker.Run() // Register the controller diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go new file mode 100644 index 00000000000..e8a3a152a13 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_client_listener.go @@ -0,0 +1,51 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "io" + "net" + + "github.com/hashicorp/yamux" +) + +var _ net.Listener = (*blockedClientListener)(nil) + +// blockedClientListener accepts connections for a specific gRPC broker stream +// ID on the client (host) side of the connection. +type blockedClientListener struct { + session *yamux.Session + waitCh chan struct{} + doneCh <-chan struct{} +} + +func newBlockedClientListener(session *yamux.Session, doneCh <-chan struct{}) *blockedClientListener { + return &blockedClientListener{ + waitCh: make(chan struct{}, 1), + doneCh: doneCh, + session: session, + } +} + +func (b *blockedClientListener) Accept() (net.Conn, error) { + select { + case <-b.waitCh: + return b.session.Accept() + case <-b.doneCh: + return nil, io.EOF + } +} + +func (b *blockedClientListener) Addr() net.Addr { + return b.session.Addr() +} + +func (b *blockedClientListener) Close() error { + // We don't close the session, the client muxer is responsible for that. + return nil +} + +func (b *blockedClientListener) unblock() { + b.waitCh <- struct{}{} +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go new file mode 100644 index 00000000000..0edb2c05d26 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/blocked_server_listener.go @@ -0,0 +1,49 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "io" + "net" +) + +var _ net.Listener = (*blockedServerListener)(nil) + +// blockedServerListener accepts connections for a specific gRPC broker stream +// ID on the server (plugin) side of the connection. +type blockedServerListener struct { + addr net.Addr + acceptCh chan acceptResult + doneCh <-chan struct{} +} + +type acceptResult struct { + conn net.Conn + err error +} + +func newBlockedServerListener(addr net.Addr, doneCh <-chan struct{}) *blockedServerListener { + return &blockedServerListener{ + addr: addr, + acceptCh: make(chan acceptResult), + doneCh: doneCh, + } +} + +func (b *blockedServerListener) Accept() (net.Conn, error) { + select { + case accept := <-b.acceptCh: + return accept.conn, accept.err + case <-b.doneCh: + return nil, io.EOF + } +} + +func (b *blockedServerListener) Addr() net.Addr { + return b.addr +} + +func (b *blockedServerListener) Close() error { + return nil +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go new file mode 100644 index 00000000000..b203ba467b2 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_client_muxer.go @@ -0,0 +1,105 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "fmt" + "net" + "sync" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/yamux" +) + +var _ GRPCMuxer = (*GRPCClientMuxer)(nil) + +// GRPCClientMuxer implements the client (host) side of the gRPC broker's +// GRPCMuxer interface for multiplexing multiple gRPC broker connections over +// a single net.Conn. +// +// The client dials the initial net.Conn eagerly, and creates a yamux.Session +// as the implementation for multiplexing any additional connections. +// +// Each net.Listener returned from Listener will block until the client receives +// a knock that matches its gRPC broker stream ID. There is no default listener +// on the client, as it is a client for the gRPC broker's control services. (See +// GRPCServerMuxer for more details). +type GRPCClientMuxer struct { + logger hclog.Logger + session *yamux.Session + + acceptMutex sync.Mutex + acceptListeners map[uint32]*blockedClientListener +} + +func NewGRPCClientMuxer(logger hclog.Logger, addr net.Addr) (*GRPCClientMuxer, error) { + // Eagerly establish the underlying connection as early as possible. + logger.Debug("making new client mux initial connection", "addr", addr) + conn, err := net.Dial(addr.Network(), addr.String()) + if err != nil { + return nil, err + } + if tcpConn, ok := conn.(*net.TCPConn); ok { + // Make sure to set keep alive so that the connection doesn't die + _ = tcpConn.SetKeepAlive(true) + } + + cfg := yamux.DefaultConfig() + cfg.Logger = logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, + }) + cfg.LogOutput = nil + sess, err := yamux.Client(conn, cfg) + if err != nil { + return nil, err + } + + logger.Debug("client muxer connected", "addr", addr) + m := &GRPCClientMuxer{ + logger: logger, + session: sess, + acceptListeners: make(map[uint32]*blockedClientListener), + } + + return m, nil +} + +func (m *GRPCClientMuxer) Enabled() bool { + return m != nil +} + +func (m *GRPCClientMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { + ln := newBlockedClientListener(m.session, doneCh) + + m.acceptMutex.Lock() + m.acceptListeners[id] = ln + m.acceptMutex.Unlock() + + return ln, nil +} + +func (m *GRPCClientMuxer) AcceptKnock(id uint32) error { + m.acceptMutex.Lock() + defer m.acceptMutex.Unlock() + + ln, ok := m.acceptListeners[id] + if !ok { + return fmt.Errorf("no listener for id %d", id) + } + ln.unblock() + return nil +} + +func (m *GRPCClientMuxer) Dial() (net.Conn, error) { + stream, err := m.session.Open() + if err != nil { + return nil, fmt.Errorf("error dialling new client stream: %w", err) + } + + return stream, nil +} + +func (m *GRPCClientMuxer) Close() error { + return m.session.Close() +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go new file mode 100644 index 00000000000..c52aaf553e9 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_muxer.go @@ -0,0 +1,41 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "net" +) + +// GRPCMuxer enables multiple implementations of net.Listener to accept +// connections over a single "main" multiplexed net.Conn, and dial multiple +// client connections over the same multiplexed net.Conn. +// +// The first multiplexed connection is used to serve the gRPC broker's own +// control services: plugin.GRPCBroker, plugin.GRPCController, plugin.GRPCStdio. +// +// Clients must "knock" before dialling, to tell the server side that the +// next net.Conn should be accepted onto a specific stream ID. The knock is a +// bidirectional streaming message on the plugin.GRPCBroker service. +type GRPCMuxer interface { + // Enabled determines whether multiplexing should be used. It saves users + // of the interface from having to compare an interface with nil, which + // is a bit awkward to do correctly. + Enabled() bool + + // Listener returns a multiplexed listener that will wait until AcceptKnock + // is called with a matching ID before its Accept function returns. + Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) + + // AcceptKnock unblocks the listener with the matching ID, and returns an + // error if it hasn't been created yet. + AcceptKnock(id uint32) error + + // Dial makes a new multiplexed client connection. To dial a specific ID, + // a knock must be sent first. + Dial() (net.Conn, error) + + // Close closes connections and releases any resources associated with the + // muxer. + Close() error +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go new file mode 100644 index 00000000000..27696ee769d --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/grpcmux/grpc_server_muxer.go @@ -0,0 +1,190 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package grpcmux + +import ( + "errors" + "fmt" + "net" + "sync" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/yamux" +) + +var _ GRPCMuxer = (*GRPCServerMuxer)(nil) +var _ net.Listener = (*GRPCServerMuxer)(nil) + +// GRPCServerMuxer implements the server (plugin) side of the gRPC broker's +// GRPCMuxer interface for multiplexing multiple gRPC broker connections over +// a single net.Conn. +// +// The server side needs a listener to serve the gRPC broker's control services, +// which includes the service we will receive knocks on. That means we always +// accept the first connection onto a "default" main listener, and if we accept +// any further connections without receiving a knock first, they are also given +// to the default listener. +// +// When creating additional multiplexed listeners for specific stream IDs, we +// can't control the order in which gRPC servers will call Accept() on each +// listener, but we do need to control which gRPC server accepts which connection. +// As such, each multiplexed listener blocks waiting on a channel. It will be +// unblocked when a knock is received for the matching stream ID. +type GRPCServerMuxer struct { + addr net.Addr + logger hclog.Logger + + sessionErrCh chan error + sess *yamux.Session + + knockCh chan uint32 + + acceptMutex sync.Mutex + acceptChannels map[uint32]chan acceptResult +} + +func NewGRPCServerMuxer(logger hclog.Logger, ln net.Listener) *GRPCServerMuxer { + m := &GRPCServerMuxer{ + addr: ln.Addr(), + logger: logger, + + sessionErrCh: make(chan error), + + knockCh: make(chan uint32, 1), + acceptChannels: make(map[uint32]chan acceptResult), + } + + go m.acceptSession(ln) + + return m +} + +// acceptSessionAndMuxAccept is responsible for establishing the yamux session, +// and then kicking off the acceptLoop function. +func (m *GRPCServerMuxer) acceptSession(ln net.Listener) { + defer close(m.sessionErrCh) + + m.logger.Debug("accepting initial connection", "addr", m.addr) + conn, err := ln.Accept() + if err != nil { + m.sessionErrCh <- err + return + } + + m.logger.Debug("initial server connection accepted", "addr", m.addr) + cfg := yamux.DefaultConfig() + cfg.Logger = m.logger.Named("yamux").StandardLogger(&hclog.StandardLoggerOptions{ + InferLevels: true, + }) + cfg.LogOutput = nil + m.sess, err = yamux.Server(conn, cfg) + if err != nil { + m.sessionErrCh <- err + return + } +} + +func (m *GRPCServerMuxer) session() (*yamux.Session, error) { + select { + case err := <-m.sessionErrCh: + if err != nil { + return nil, err + } + case <-time.After(5 * time.Second): + return nil, errors.New("timed out waiting for connection to be established") + } + + // Should never happen. + if m.sess == nil { + return nil, errors.New("no connection established and no error received") + } + + return m.sess, nil +} + +// Accept accepts all incoming connections and routes them to the correct +// stream ID based on the most recent knock received. +func (m *GRPCServerMuxer) Accept() (net.Conn, error) { + session, err := m.session() + if err != nil { + return nil, fmt.Errorf("error establishing yamux session: %w", err) + } + + for { + conn, acceptErr := session.Accept() + + select { + case id := <-m.knockCh: + m.acceptMutex.Lock() + acceptCh, ok := m.acceptChannels[id] + m.acceptMutex.Unlock() + + if !ok { + if conn != nil { + _ = conn.Close() + } + return nil, fmt.Errorf("received knock on ID %d that doesn't have a listener", id) + } + m.logger.Debug("sending conn to brokered listener", "id", id) + acceptCh <- acceptResult{ + conn: conn, + err: acceptErr, + } + default: + m.logger.Debug("sending conn to default listener") + return conn, acceptErr + } + } +} + +func (m *GRPCServerMuxer) Addr() net.Addr { + return m.addr +} + +func (m *GRPCServerMuxer) Close() error { + session, err := m.session() + if err != nil { + return err + } + + return session.Close() +} + +func (m *GRPCServerMuxer) Enabled() bool { + return m != nil +} + +func (m *GRPCServerMuxer) Listener(id uint32, doneCh <-chan struct{}) (net.Listener, error) { + sess, err := m.session() + if err != nil { + return nil, err + } + + ln := newBlockedServerListener(sess.Addr(), doneCh) + m.acceptMutex.Lock() + m.acceptChannels[id] = ln.acceptCh + m.acceptMutex.Unlock() + + return ln, nil +} + +func (m *GRPCServerMuxer) Dial() (net.Conn, error) { + sess, err := m.session() + if err != nil { + return nil, err + } + + stream, err := sess.OpenStream() + if err != nil { + return nil, fmt.Errorf("error dialling new server stream: %w", err) + } + + return stream, nil +} + +func (m *GRPCServerMuxer) AcceptKnock(id uint32) error { + m.knockCh <- id + return nil +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go deleted file mode 100644 index a3b5fb124e0..00000000000 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/gen.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -//go:generate protoc -I ./ ./grpc_broker.proto ./grpc_controller.proto ./grpc_stdio.proto --go_out=plugins=grpc:. - -package plugin diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go index 303b63e43b1..acc6dc9c77f 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.pb.go @@ -1,203 +1,264 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_broker.proto +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_broker.proto package plugin -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type ConnInfo struct { - ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` - Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` - Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + Network string `protobuf:"bytes,2,opt,name=network,proto3" json:"network,omitempty"` + Address string `protobuf:"bytes,3,opt,name=address,proto3" json:"address,omitempty"` + Knock *ConnInfo_Knock `protobuf:"bytes,4,opt,name=knock,proto3" json:"knock,omitempty"` } -func (m *ConnInfo) Reset() { *m = ConnInfo{} } -func (m *ConnInfo) String() string { return proto.CompactTextString(m) } -func (*ConnInfo) ProtoMessage() {} -func (*ConnInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_broker_3322b07398605250, []int{0} -} -func (m *ConnInfo) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_ConnInfo.Unmarshal(m, b) -} -func (m *ConnInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_ConnInfo.Marshal(b, m, deterministic) -} -func (dst *ConnInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConnInfo.Merge(dst, src) +func (x *ConnInfo) Reset() { + *x = ConnInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func (m *ConnInfo) XXX_Size() int { - return xxx_messageInfo_ConnInfo.Size(m) + +func (x *ConnInfo) String() string { + return protoimpl.X.MessageStringOf(x) } -func (m *ConnInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ConnInfo.DiscardUnknown(m) + +func (*ConnInfo) ProtoMessage() {} + +func (x *ConnInfo) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -var xxx_messageInfo_ConnInfo proto.InternalMessageInfo +// Deprecated: Use ConnInfo.ProtoReflect.Descriptor instead. +func (*ConnInfo) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0} +} -func (m *ConnInfo) GetServiceId() uint32 { - if m != nil { - return m.ServiceId +func (x *ConnInfo) GetServiceId() uint32 { + if x != nil { + return x.ServiceId } return 0 } -func (m *ConnInfo) GetNetwork() string { - if m != nil { - return m.Network +func (x *ConnInfo) GetNetwork() string { + if x != nil { + return x.Network } return "" } -func (m *ConnInfo) GetAddress() string { - if m != nil { - return m.Address +func (x *ConnInfo) GetAddress() string { + if x != nil { + return x.Address } return "" } -func init() { - proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo") +func (x *ConnInfo) GetKnock() *ConnInfo_Knock { + if x != nil { + return x.Knock + } + return nil } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn +type ConnInfo_Knock struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// GRPCBrokerClient is the client API for GRPCBroker service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GRPCBrokerClient interface { - StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) + Knock bool `protobuf:"varint,1,opt,name=knock,proto3" json:"knock,omitempty"` + Ack bool `protobuf:"varint,2,opt,name=ack,proto3" json:"ack,omitempty"` + Error string `protobuf:"bytes,3,opt,name=error,proto3" json:"error,omitempty"` } -type gRPCBrokerClient struct { - cc *grpc.ClientConn +func (x *ConnInfo_Knock) Reset() { + *x = ConnInfo_Knock{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient { - return &gRPCBrokerClient{cc} +func (x *ConnInfo_Knock) String() string { + return protoimpl.X.MessageStringOf(x) } -func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { - stream, err := c.cc.NewStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], "/plugin.GRPCBroker/StartStream", opts...) - if err != nil { - return nil, err - } - x := &gRPCBrokerStartStreamClient{stream} - return x, nil -} +func (*ConnInfo_Knock) ProtoMessage() {} -type GRPCBroker_StartStreamClient interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ClientStream +func (x *ConnInfo_Knock) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_broker_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type gRPCBrokerStartStreamClient struct { - grpc.ClientStream +// Deprecated: Use ConnInfo_Knock.ProtoReflect.Descriptor instead. +func (*ConnInfo_Knock) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_broker_proto_rawDescGZIP(), []int{0, 0} } -func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { - return x.ClientStream.SendMsg(m) +func (x *ConnInfo_Knock) GetKnock() bool { + if x != nil { + return x.Knock + } + return false } -func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *ConnInfo_Knock) GetAck() bool { + if x != nil { + return x.Ack } - return m, nil + return false } -// GRPCBrokerServer is the server API for GRPCBroker service. -type GRPCBrokerServer interface { - StartStream(GRPCBroker_StartStreamServer) error +func (x *ConnInfo_Knock) GetError() string { + if x != nil { + return x.Error + } + return "" } -func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) { - s.RegisterService(&_GRPCBroker_serviceDesc, srv) +var File_internal_plugin_grpc_broker_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_broker_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x62, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, 0xd2, 0x01, 0x0a, 0x08, + 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x09, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, + 0x72, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6e, 0x65, 0x74, 0x77, 0x6f, 0x72, + 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x2c, 0x0a, 0x05, 0x6b, + 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4b, 0x6e, 0x6f, + 0x63, 0x6b, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x1a, 0x45, 0x0a, 0x05, 0x4b, 0x6e, 0x6f, + 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x05, 0x6b, 0x6e, 0x6f, 0x63, 0x6b, 0x12, 0x10, 0x0a, 0x03, 0x61, 0x63, 0x6b, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x61, 0x63, 0x6b, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x32, 0x43, 0x0a, 0x0a, 0x47, 0x52, 0x50, 0x43, 0x42, 0x72, 0x6f, 0x6b, 0x65, 0x72, 0x12, 0x35, + 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x72, 0x74, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, 0x10, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x1a, + 0x10, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x28, 0x01, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { - return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) -} +var ( + file_internal_plugin_grpc_broker_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_broker_proto_rawDescData = file_internal_plugin_grpc_broker_proto_rawDesc +) -type GRPCBroker_StartStreamServer interface { - Send(*ConnInfo) error - Recv() (*ConnInfo, error) - grpc.ServerStream +func file_internal_plugin_grpc_broker_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_broker_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_broker_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_broker_proto_rawDescData) + }) + return file_internal_plugin_grpc_broker_proto_rawDescData } -type gRPCBrokerStartStreamServer struct { - grpc.ServerStream +var file_internal_plugin_grpc_broker_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_internal_plugin_grpc_broker_proto_goTypes = []interface{}{ + (*ConnInfo)(nil), // 0: plugin.ConnInfo + (*ConnInfo_Knock)(nil), // 1: plugin.ConnInfo.Knock } - -func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { - return x.ServerStream.SendMsg(m) +var file_internal_plugin_grpc_broker_proto_depIdxs = []int32{ + 1, // 0: plugin.ConnInfo.knock:type_name -> plugin.ConnInfo.Knock + 0, // 1: plugin.GRPCBroker.StartStream:input_type -> plugin.ConnInfo + 0, // 2: plugin.GRPCBroker.StartStream:output_type -> plugin.ConnInfo + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name } -func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { - m := new(ConnInfo) - if err := x.ServerStream.RecvMsg(m); err != nil { - return nil, err +func init() { file_internal_plugin_grpc_broker_proto_init() } +func file_internal_plugin_grpc_broker_proto_init() { + if File_internal_plugin_grpc_broker_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_broker_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_internal_plugin_grpc_broker_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConnInfo_Knock); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - return m, nil -} - -var _GRPCBroker_serviceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCBroker", - HandlerType: (*GRPCBrokerServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StartStream", - Handler: _GRPCBroker_StartStream_Handler, - ServerStreams: true, - ClientStreams: true, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_broker_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "grpc_broker.proto", -} - -func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor_grpc_broker_3322b07398605250) } - -var fileDescriptor_grpc_broker_3322b07398605250 = []byte{ - // 175 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, - 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b, - 0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91, - 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7, - 0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20, - 0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc, - 0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1, - 0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b, - 0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x9d, 0x38, 0xa2, 0xa0, 0xae, 0x4d, 0x62, 0x03, 0x3b, - 0xde, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x10, 0x15, 0x39, 0x47, 0xd1, 0x00, 0x00, 0x00, + GoTypes: file_internal_plugin_grpc_broker_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_broker_proto_depIdxs, + MessageInfos: file_internal_plugin_grpc_broker_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_broker_proto = out.File + file_internal_plugin_grpc_broker_proto_rawDesc = nil + file_internal_plugin_grpc_broker_proto_goTypes = nil + file_internal_plugin_grpc_broker_proto_depIdxs = nil } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto index 038423ded7a..c92cd645cb6 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker.proto @@ -3,12 +3,18 @@ syntax = "proto3"; package plugin; -option go_package = "plugin"; +option go_package = "./plugin"; message ConnInfo { uint32 service_id = 1; string network = 2; string address = 3; + message Knock { + bool knock = 1; + bool ack = 2; + string error = 3; + } + Knock knock = 4; } service GRPCBroker { diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go new file mode 100644 index 00000000000..1b0f80705d8 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_broker_grpc.pb.go @@ -0,0 +1,142 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_broker.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCBroker_StartStream_FullMethodName = "/plugin.GRPCBroker/StartStream" +) + +// GRPCBrokerClient is the client API for GRPCBroker service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCBrokerClient interface { + StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) +} + +type gRPCBrokerClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCBrokerClient(cc grpc.ClientConnInterface) GRPCBrokerClient { + return &gRPCBrokerClient{cc} +} + +func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) { + stream, err := c.cc.NewStream(ctx, &GRPCBroker_ServiceDesc.Streams[0], GRPCBroker_StartStream_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &gRPCBrokerStartStreamClient{stream} + return x, nil +} + +type GRPCBroker_StartStreamClient interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ClientStream +} + +type gRPCBrokerStartStreamClient struct { + grpc.ClientStream +} + +func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error { + return x.ClientStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBrokerServer is the server API for GRPCBroker service. +// All implementations should embed UnimplementedGRPCBrokerServer +// for forward compatibility +type GRPCBrokerServer interface { + StartStream(GRPCBroker_StartStreamServer) error +} + +// UnimplementedGRPCBrokerServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCBrokerServer struct { +} + +func (UnimplementedGRPCBrokerServer) StartStream(GRPCBroker_StartStreamServer) error { + return status.Errorf(codes.Unimplemented, "method StartStream not implemented") +} + +// UnsafeGRPCBrokerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCBrokerServer will +// result in compilation errors. +type UnsafeGRPCBrokerServer interface { + mustEmbedUnimplementedGRPCBrokerServer() +} + +func RegisterGRPCBrokerServer(s grpc.ServiceRegistrar, srv GRPCBrokerServer) { + s.RegisterService(&GRPCBroker_ServiceDesc, srv) +} + +func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream}) +} + +type GRPCBroker_StartStreamServer interface { + Send(*ConnInfo) error + Recv() (*ConnInfo, error) + grpc.ServerStream +} + +type gRPCBrokerStartStreamServer struct { + grpc.ServerStream +} + +func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error { + return x.ServerStream.SendMsg(m) +} + +func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) { + m := new(ConnInfo) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCBroker_ServiceDesc is the grpc.ServiceDesc for GRPCBroker service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCBroker_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCBroker", + HandlerType: (*GRPCBrokerServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StartStream", + Handler: _GRPCBroker_StartStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "internal/plugin/grpc_broker.proto", +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go index 982fca0a574..8ca48e0d92d 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.pb.go @@ -1,145 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_controller.proto +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_controller.proto package plugin -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - import ( - context "context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type Empty struct { - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } -func (m *Empty) Reset() { *m = Empty{} } -func (m *Empty) String() string { return proto.CompactTextString(m) } -func (*Empty) ProtoMessage() {} -func (*Empty) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_controller_08f8296ef6d80436, []int{0} -} -func (m *Empty) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Empty.Unmarshal(m, b) -} -func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Empty.Marshal(b, m, deterministic) -} -func (dst *Empty) XXX_Merge(src proto.Message) { - xxx_messageInfo_Empty.Merge(dst, src) -} -func (m *Empty) XXX_Size() int { - return xxx_messageInfo_Empty.Size(m) -} -func (m *Empty) XXX_DiscardUnknown() { - xxx_messageInfo_Empty.DiscardUnknown(m) +func (x *Empty) Reset() { + *x = Empty{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } } -var xxx_messageInfo_Empty proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Empty)(nil), "plugin.Empty") +func (x *Empty) String() string { + return protoimpl.X.MessageStringOf(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 +func (*Empty) ProtoMessage() {} -// GRPCControllerClient is the client API for GRPCController service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GRPCControllerClient interface { - Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +func (x *Empty) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_controller_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) } -type gRPCControllerClient struct { - cc *grpc.ClientConn +// Deprecated: Use Empty.ProtoReflect.Descriptor instead. +func (*Empty) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_controller_proto_rawDescGZIP(), []int{0} } -func NewGRPCControllerClient(cc *grpc.ClientConn) GRPCControllerClient { - return &gRPCControllerClient{cc} -} +var File_internal_plugin_grpc_controller_proto protoreflect.FileDescriptor -func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { - out := new(Empty) - err := c.cc.Invoke(ctx, "/plugin.GRPCController/Shutdown", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil +var file_internal_plugin_grpc_controller_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x22, + 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x32, 0x3a, 0x0a, 0x0e, 0x47, 0x52, 0x50, 0x43, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x28, 0x0a, 0x08, 0x53, 0x68, + 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x12, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x0d, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } -// GRPCControllerServer is the server API for GRPCController service. -type GRPCControllerServer interface { - Shutdown(context.Context, *Empty) (*Empty, error) +var ( + file_internal_plugin_grpc_controller_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_controller_proto_rawDescData = file_internal_plugin_grpc_controller_proto_rawDesc +) + +func file_internal_plugin_grpc_controller_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_controller_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_controller_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_controller_proto_rawDescData) + }) + return file_internal_plugin_grpc_controller_proto_rawDescData } -func RegisterGRPCControllerServer(s *grpc.Server, srv GRPCControllerServer) { - s.RegisterService(&_GRPCController_serviceDesc, srv) +var file_internal_plugin_grpc_controller_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_plugin_grpc_controller_proto_goTypes = []interface{}{ + (*Empty)(nil), // 0: plugin.Empty +} +var file_internal_plugin_grpc_controller_proto_depIdxs = []int32{ + 0, // 0: plugin.GRPCController.Shutdown:input_type -> plugin.Empty + 0, // 1: plugin.GRPCController.Shutdown:output_type -> plugin.Empty + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name } -func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(GRPCControllerServer).Shutdown(ctx, in) +func init() { file_internal_plugin_grpc_controller_proto_init() } +func file_internal_plugin_grpc_controller_proto_init() { + if File_internal_plugin_grpc_controller_proto != nil { + return } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/plugin.GRPCController/Shutdown", + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_controller_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Empty); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _GRPCController_serviceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCController", - HandlerType: (*GRPCControllerServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Shutdown", - Handler: _GRPCController_Shutdown_Handler, + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_controller_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "grpc_controller.proto", -} - -func init() { - proto.RegisterFile("grpc_controller.proto", fileDescriptor_grpc_controller_08f8296ef6d80436) -} - -var fileDescriptor_grpc_controller_08f8296ef6d80436 = []byte{ - // 108 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4d, 0x2f, 0x2a, 0x48, - 0x8e, 0x4f, 0xce, 0xcf, 0x2b, 0x29, 0xca, 0xcf, 0xc9, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x62, 0x2b, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x62, 0xe7, 0x62, 0x75, 0xcd, 0x2d, - 0x28, 0xa9, 0x34, 0xb2, 0xe2, 0xe2, 0x73, 0x0f, 0x0a, 0x70, 0x76, 0x86, 0x2b, 0x14, 0xd2, 0xe0, - 0xe2, 0x08, 0xce, 0x28, 0x2d, 0x49, 0xc9, 0x2f, 0xcf, 0x13, 0xe2, 0xd5, 0x83, 0xa8, 0xd7, 0x03, - 0x2b, 0x96, 0x42, 0xe5, 0x3a, 0x71, 0x44, 0x41, 0x8d, 0x4b, 0x62, 0x03, 0x9b, 0x6e, 0x0c, 0x08, - 0x00, 0x00, 0xff, 0xff, 0xab, 0x7c, 0x27, 0xe5, 0x76, 0x00, 0x00, 0x00, + GoTypes: file_internal_plugin_grpc_controller_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_controller_proto_depIdxs, + MessageInfos: file_internal_plugin_grpc_controller_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_controller_proto = out.File + file_internal_plugin_grpc_controller_proto_rawDesc = nil + file_internal_plugin_grpc_controller_proto_goTypes = nil + file_internal_plugin_grpc_controller_proto_depIdxs = nil } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto index 3157eb885de..2755fa638b5 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package plugin; -option go_package = "plugin"; +option go_package = "./plugin"; message Empty { } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go new file mode 100644 index 00000000000..427611aa00f --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_controller_grpc.pb.go @@ -0,0 +1,110 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_controller.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCController_Shutdown_FullMethodName = "/plugin.GRPCController/Shutdown" +) + +// GRPCControllerClient is the client API for GRPCController service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCControllerClient interface { + Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) +} + +type gRPCControllerClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCControllerClient(cc grpc.ClientConnInterface) GRPCControllerClient { + return &gRPCControllerClient{cc} +} + +func (c *gRPCControllerClient) Shutdown(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*Empty, error) { + out := new(Empty) + err := c.cc.Invoke(ctx, GRPCController_Shutdown_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// GRPCControllerServer is the server API for GRPCController service. +// All implementations should embed UnimplementedGRPCControllerServer +// for forward compatibility +type GRPCControllerServer interface { + Shutdown(context.Context, *Empty) (*Empty, error) +} + +// UnimplementedGRPCControllerServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCControllerServer struct { +} + +func (UnimplementedGRPCControllerServer) Shutdown(context.Context, *Empty) (*Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} + +// UnsafeGRPCControllerServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCControllerServer will +// result in compilation errors. +type UnsafeGRPCControllerServer interface { + mustEmbedUnimplementedGRPCControllerServer() +} + +func RegisterGRPCControllerServer(s grpc.ServiceRegistrar, srv GRPCControllerServer) { + s.RegisterService(&GRPCController_ServiceDesc, srv) +} + +func _GRPCController_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(GRPCControllerServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: GRPCController_Shutdown_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(GRPCControllerServer).Shutdown(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// GRPCController_ServiceDesc is the grpc.ServiceDesc for GRPCController service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCController_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCController", + HandlerType: (*GRPCControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Shutdown", + Handler: _GRPCController_Shutdown_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "internal/plugin/grpc_controller.proto", +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go index bdef71b8aa0..139cbb4a90b 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.pb.go @@ -1,28 +1,28 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + // Code generated by protoc-gen-go. DO NOT EDIT. -// source: grpc_stdio.proto +// versions: +// protoc-gen-go v1.31.0 +// protoc (unknown) +// source: internal/plugin/grpc_stdio.proto package plugin -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import empty "github.com/golang/protobuf/ptypes/empty" - import ( - context "context" - grpc "google.golang.org/grpc" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + reflect "reflect" + sync "sync" ) -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) type StdioData_Channel int32 @@ -32,202 +32,194 @@ const ( StdioData_STDERR StdioData_Channel = 2 ) -var StdioData_Channel_name = map[int32]string{ - 0: "INVALID", - 1: "STDOUT", - 2: "STDERR", -} -var StdioData_Channel_value = map[string]int32{ - "INVALID": 0, - "STDOUT": 1, - "STDERR": 2, -} - -func (x StdioData_Channel) String() string { - return proto.EnumName(StdioData_Channel_name, int32(x)) -} -func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0, 0} -} +// Enum value maps for StdioData_Channel. +var ( + StdioData_Channel_name = map[int32]string{ + 0: "INVALID", + 1: "STDOUT", + 2: "STDERR", + } + StdioData_Channel_value = map[string]int32{ + "INVALID": 0, + "STDOUT": 1, + "STDERR": 2, + } +) -// StdioData is a single chunk of stdout or stderr data that is streamed -// from GRPCStdio. -type StdioData struct { - Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` - Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` +func (x StdioData_Channel) Enum() *StdioData_Channel { + p := new(StdioData_Channel) + *p = x + return p } -func (m *StdioData) Reset() { *m = StdioData{} } -func (m *StdioData) String() string { return proto.CompactTextString(m) } -func (*StdioData) ProtoMessage() {} -func (*StdioData) Descriptor() ([]byte, []int) { - return fileDescriptor_grpc_stdio_db2934322ca63bd5, []int{0} -} -func (m *StdioData) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_StdioData.Unmarshal(m, b) -} -func (m *StdioData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_StdioData.Marshal(b, m, deterministic) -} -func (dst *StdioData) XXX_Merge(src proto.Message) { - xxx_messageInfo_StdioData.Merge(dst, src) -} -func (m *StdioData) XXX_Size() int { - return xxx_messageInfo_StdioData.Size(m) -} -func (m *StdioData) XXX_DiscardUnknown() { - xxx_messageInfo_StdioData.DiscardUnknown(m) +func (x StdioData_Channel) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -var xxx_messageInfo_StdioData proto.InternalMessageInfo - -func (m *StdioData) GetChannel() StdioData_Channel { - if m != nil { - return m.Channel - } - return StdioData_INVALID +func (StdioData_Channel) Descriptor() protoreflect.EnumDescriptor { + return file_internal_plugin_grpc_stdio_proto_enumTypes[0].Descriptor() } -func (m *StdioData) GetData() []byte { - if m != nil { - return m.Data - } - return nil +func (StdioData_Channel) Type() protoreflect.EnumType { + return &file_internal_plugin_grpc_stdio_proto_enumTypes[0] } -func init() { - proto.RegisterType((*StdioData)(nil), "plugin.StdioData") - proto.RegisterEnum("plugin.StdioData_Channel", StdioData_Channel_name, StdioData_Channel_value) +func (x StdioData_Channel) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// GRPCStdioClient is the client API for GRPCStdio service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type GRPCStdioClient interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +// Deprecated: Use StdioData_Channel.Descriptor instead. +func (StdioData_Channel) EnumDescriptor() ([]byte, []int) { + return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0, 0} } -type gRPCStdioClient struct { - cc *grpc.ClientConn -} +// StdioData is a single chunk of stdout or stderr data that is streamed +// from GRPCStdio. +type StdioData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields -func NewGRPCStdioClient(cc *grpc.ClientConn) GRPCStdioClient { - return &gRPCStdioClient{cc} + Channel StdioData_Channel `protobuf:"varint,1,opt,name=channel,proto3,enum=plugin.StdioData_Channel" json:"channel,omitempty"` + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` } -func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { - stream, err := c.cc.NewStream(ctx, &_GRPCStdio_serviceDesc.Streams[0], "/plugin.GRPCStdio/StreamStdio", opts...) - if err != nil { - return nil, err - } - x := &gRPCStdioStreamStdioClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err +func (x *StdioData) Reset() { + *x = StdioData{} + if protoimpl.UnsafeEnabled { + mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return x, nil } -type GRPCStdio_StreamStdioClient interface { - Recv() (*StdioData, error) - grpc.ClientStream +func (x *StdioData) String() string { + return protoimpl.X.MessageStringOf(x) } -type gRPCStdioStreamStdioClient struct { - grpc.ClientStream -} +func (*StdioData) ProtoMessage() {} -func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { - m := new(StdioData) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err +func (x *StdioData) ProtoReflect() protoreflect.Message { + mi := &file_internal_plugin_grpc_stdio_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return m, nil -} - -// GRPCStdioServer is the server API for GRPCStdio service. -type GRPCStdioServer interface { - // StreamStdio returns a stream that contains all the stdout/stderr. - // This RPC endpoint must only be called ONCE. Once stdio data is consumed - // it is not sent again. - // - // Callers should connect early to prevent blocking on the plugin process. - StreamStdio(*empty.Empty, GRPCStdio_StreamStdioServer) error + return mi.MessageOf(x) } -func RegisterGRPCStdioServer(s *grpc.Server, srv GRPCStdioServer) { - s.RegisterService(&_GRPCStdio_serviceDesc, srv) +// Deprecated: Use StdioData.ProtoReflect.Descriptor instead. +func (*StdioData) Descriptor() ([]byte, []int) { + return file_internal_plugin_grpc_stdio_proto_rawDescGZIP(), []int{0} } -func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(empty.Empty) - if err := stream.RecvMsg(m); err != nil { - return err +func (x *StdioData) GetChannel() StdioData_Channel { + if x != nil { + return x.Channel } - return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) -} - -type GRPCStdio_StreamStdioServer interface { - Send(*StdioData) error - grpc.ServerStream + return StdioData_INVALID } -type gRPCStdioStreamStdioServer struct { - grpc.ServerStream +func (x *StdioData) GetData() []byte { + if x != nil { + return x.Data + } + return nil } -func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { - return x.ServerStream.SendMsg(m) -} +var File_internal_plugin_grpc_stdio_proto protoreflect.FileDescriptor + +var file_internal_plugin_grpc_stdio_proto_rawDesc = []byte{ + 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x12, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x01, 0x0a, 0x09, 0x53, 0x74, 0x64, 0x69, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x53, 0x74, 0x64, 0x69, 0x6f, 0x44, 0x61, 0x74, 0x61, 0x2e, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, + 0x6c, 0x52, 0x07, 0x63, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x2e, + 0x0a, 0x07, 0x43, 0x68, 0x61, 0x6e, 0x6e, 0x65, 0x6c, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, + 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x4f, 0x55, 0x54, + 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x54, 0x44, 0x45, 0x52, 0x52, 0x10, 0x02, 0x32, 0x47, + 0x0a, 0x09, 0x47, 0x52, 0x50, 0x43, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x3a, 0x0a, 0x0b, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, + 0x74, 0x79, 0x1a, 0x11, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x53, 0x74, 0x64, 0x69, + 0x6f, 0x44, 0x61, 0x74, 0x61, 0x30, 0x01, 0x42, 0x0a, 0x5a, 0x08, 0x2e, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_internal_plugin_grpc_stdio_proto_rawDescOnce sync.Once + file_internal_plugin_grpc_stdio_proto_rawDescData = file_internal_plugin_grpc_stdio_proto_rawDesc +) -var _GRPCStdio_serviceDesc = grpc.ServiceDesc{ - ServiceName: "plugin.GRPCStdio", - HandlerType: (*GRPCStdioServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{ - { - StreamName: "StreamStdio", - Handler: _GRPCStdio_StreamStdio_Handler, - ServerStreams: true, +func file_internal_plugin_grpc_stdio_proto_rawDescGZIP() []byte { + file_internal_plugin_grpc_stdio_proto_rawDescOnce.Do(func() { + file_internal_plugin_grpc_stdio_proto_rawDescData = protoimpl.X.CompressGZIP(file_internal_plugin_grpc_stdio_proto_rawDescData) + }) + return file_internal_plugin_grpc_stdio_proto_rawDescData +} + +var file_internal_plugin_grpc_stdio_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_internal_plugin_grpc_stdio_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_internal_plugin_grpc_stdio_proto_goTypes = []interface{}{ + (StdioData_Channel)(0), // 0: plugin.StdioData.Channel + (*StdioData)(nil), // 1: plugin.StdioData + (*emptypb.Empty)(nil), // 2: google.protobuf.Empty +} +var file_internal_plugin_grpc_stdio_proto_depIdxs = []int32{ + 0, // 0: plugin.StdioData.channel:type_name -> plugin.StdioData.Channel + 2, // 1: plugin.GRPCStdio.StreamStdio:input_type -> google.protobuf.Empty + 1, // 2: plugin.GRPCStdio.StreamStdio:output_type -> plugin.StdioData + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_internal_plugin_grpc_stdio_proto_init() } +func file_internal_plugin_grpc_stdio_proto_init() { + if File_internal_plugin_grpc_stdio_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_internal_plugin_grpc_stdio_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StdioData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_internal_plugin_grpc_stdio_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 1, }, - }, - Metadata: "grpc_stdio.proto", -} - -func init() { proto.RegisterFile("grpc_stdio.proto", fileDescriptor_grpc_stdio_db2934322ca63bd5) } - -var fileDescriptor_grpc_stdio_db2934322ca63bd5 = []byte{ - // 221 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0x2f, 0x2a, 0x48, - 0x8e, 0x2f, 0x2e, 0x49, 0xc9, 0xcc, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b, 0xc8, - 0x29, 0x4d, 0xcf, 0xcc, 0x93, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x07, 0x8b, 0x26, - 0x95, 0xa6, 0xe9, 0xa7, 0xe6, 0x16, 0x94, 0x54, 0x42, 0x14, 0x29, 0xb5, 0x30, 0x72, 0x71, 0x06, - 0x83, 0x34, 0xb9, 0x24, 0x96, 0x24, 0x0a, 0x19, 0x73, 0xb1, 0x27, 0x67, 0x24, 0xe6, 0xe5, 0xa5, - 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, 0x19, 0x49, 0xea, 0x41, 0x0c, 0xd1, 0x83, 0xab, 0xd1, - 0x73, 0x86, 0x28, 0x08, 0x82, 0xa9, 0x14, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c, 0x49, 0x94, 0x60, - 0x52, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0xf4, 0xb8, 0xd8, 0xa1, 0xea, 0x84, 0xb8, 0xb9, - 0xd8, 0x3d, 0xfd, 0xc2, 0x1c, 0x7d, 0x3c, 0x5d, 0x04, 0x18, 0x84, 0xb8, 0xb8, 0xd8, 0x82, 0x43, - 0x5c, 0xfc, 0x43, 0x43, 0x04, 0x18, 0xa1, 0x6c, 0xd7, 0xa0, 0x20, 0x01, 0x26, 0x23, 0x77, 0x2e, - 0x4e, 0xf7, 0xa0, 0x00, 0x67, 0xb0, 0x2d, 0x42, 0x56, 0x5c, 0xdc, 0xc1, 0x25, 0x45, 0xa9, 0x89, - 0xb9, 0x10, 0xae, 0x98, 0x1e, 0xc4, 0x03, 0x7a, 0x30, 0x0f, 0xe8, 0xb9, 0x82, 0x3c, 0x20, 0x25, - 0x88, 0xe1, 0x36, 0x03, 0x46, 0x27, 0x8e, 0x28, 0xa8, 0xb7, 0x93, 0xd8, 0xc0, 0xca, 0x8d, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xbb, 0xe0, 0x69, 0x19, 0x01, 0x00, 0x00, + GoTypes: file_internal_plugin_grpc_stdio_proto_goTypes, + DependencyIndexes: file_internal_plugin_grpc_stdio_proto_depIdxs, + EnumInfos: file_internal_plugin_grpc_stdio_proto_enumTypes, + MessageInfos: file_internal_plugin_grpc_stdio_proto_msgTypes, + }.Build() + File_internal_plugin_grpc_stdio_proto = out.File + file_internal_plugin_grpc_stdio_proto_rawDesc = nil + file_internal_plugin_grpc_stdio_proto_goTypes = nil + file_internal_plugin_grpc_stdio_proto_depIdxs = nil } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto index 1c0d1d0526a..f48ac76c978 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio.proto @@ -3,7 +3,7 @@ syntax = "proto3"; package plugin; -option go_package = "plugin"; +option go_package = "./plugin"; import "google/protobuf/empty.proto"; diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go new file mode 100644 index 00000000000..f82b1503502 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/internal/plugin/grpc_stdio_grpc.pb.go @@ -0,0 +1,148 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: internal/plugin/grpc_stdio.proto + +package plugin + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + GRPCStdio_StreamStdio_FullMethodName = "/plugin.GRPCStdio/StreamStdio" +) + +// GRPCStdioClient is the client API for GRPCStdio service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type GRPCStdioClient interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) +} + +type gRPCStdioClient struct { + cc grpc.ClientConnInterface +} + +func NewGRPCStdioClient(cc grpc.ClientConnInterface) GRPCStdioClient { + return &gRPCStdioClient{cc} +} + +func (c *gRPCStdioClient) StreamStdio(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (GRPCStdio_StreamStdioClient, error) { + stream, err := c.cc.NewStream(ctx, &GRPCStdio_ServiceDesc.Streams[0], GRPCStdio_StreamStdio_FullMethodName, opts...) + if err != nil { + return nil, err + } + x := &gRPCStdioStreamStdioClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type GRPCStdio_StreamStdioClient interface { + Recv() (*StdioData, error) + grpc.ClientStream +} + +type gRPCStdioStreamStdioClient struct { + grpc.ClientStream +} + +func (x *gRPCStdioStreamStdioClient) Recv() (*StdioData, error) { + m := new(StdioData) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// GRPCStdioServer is the server API for GRPCStdio service. +// All implementations should embed UnimplementedGRPCStdioServer +// for forward compatibility +type GRPCStdioServer interface { + // StreamStdio returns a stream that contains all the stdout/stderr. + // This RPC endpoint must only be called ONCE. Once stdio data is consumed + // it is not sent again. + // + // Callers should connect early to prevent blocking on the plugin process. + StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error +} + +// UnimplementedGRPCStdioServer should be embedded to have forward compatible implementations. +type UnimplementedGRPCStdioServer struct { +} + +func (UnimplementedGRPCStdioServer) StreamStdio(*emptypb.Empty, GRPCStdio_StreamStdioServer) error { + return status.Errorf(codes.Unimplemented, "method StreamStdio not implemented") +} + +// UnsafeGRPCStdioServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to GRPCStdioServer will +// result in compilation errors. +type UnsafeGRPCStdioServer interface { + mustEmbedUnimplementedGRPCStdioServer() +} + +func RegisterGRPCStdioServer(s grpc.ServiceRegistrar, srv GRPCStdioServer) { + s.RegisterService(&GRPCStdio_ServiceDesc, srv) +} + +func _GRPCStdio_StreamStdio_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(GRPCStdioServer).StreamStdio(m, &gRPCStdioStreamStdioServer{stream}) +} + +type GRPCStdio_StreamStdioServer interface { + Send(*StdioData) error + grpc.ServerStream +} + +type gRPCStdioStreamStdioServer struct { + grpc.ServerStream +} + +func (x *gRPCStdioStreamStdioServer) Send(m *StdioData) error { + return x.ServerStream.SendMsg(m) +} + +// GRPCStdio_ServiceDesc is the grpc.ServiceDesc for GRPCStdio service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var GRPCStdio_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "plugin.GRPCStdio", + HandlerType: (*GRPCStdioServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamStdio", + Handler: _GRPCStdio_StreamStdio_Handler, + ServerStreams: true, + }, + }, + Metadata: "internal/plugin/grpc_stdio.proto", +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/server.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/server.go index 4b0f2b76919..e741bc7fa18 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/server.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/server.go @@ -21,6 +21,7 @@ import ( "strings" hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin/internal/grpcmux" "google.golang.org/grpc" ) @@ -134,6 +135,13 @@ type ServeTestConfig struct { SyncStdio bool } +func unixSocketConfigFromEnv() UnixSocketConfig { + return UnixSocketConfig{ + Group: os.Getenv(EnvUnixSocketGroup), + socketDir: os.Getenv(EnvUnixSocketDir), + } +} + // protocolVersion determines the protocol version and plugin set to be used by // the server. In the event that there is no suitable version, the last version // in the config is returned leaving the client to report the incompatibility. @@ -380,6 +388,12 @@ func Serve(opts *ServeConfig) { } case ProtocolGRPC: + var muxer *grpcmux.GRPCServerMuxer + if multiplex, _ := strconv.ParseBool(os.Getenv(envMultiplexGRPC)); multiplex { + muxer = grpcmux.NewGRPCServerMuxer(logger, listener) + listener = muxer + } + // Create the gRPC server server = &GRPCServer{ Plugins: pluginSet, @@ -389,6 +403,7 @@ func Serve(opts *ServeConfig) { Stderr: stderr_r, DoneCh: doneCh, logger: logger, + muxer: muxer, } default: @@ -407,13 +422,27 @@ func Serve(opts *ServeConfig) { // bring it up. In test mode, we don't do this because clients will // attach via a reattach config. if opts.Test == nil { - fmt.Printf("%d|%d|%s|%s|%s|%s\n", + const grpcBrokerMultiplexingSupported = true + protocolLine := fmt.Sprintf("%d|%d|%s|%s|%s|%s", CoreProtocolVersion, protoVersion, listener.Addr().Network(), listener.Addr().String(), protoType, serverCert) + + // Old clients will error with new plugins if we blindly append the + // seventh segment for gRPC broker multiplexing support, because old + // client code uses strings.SplitN(line, "|", 6), which means a seventh + // segment will get appended to the sixth segment as "sixthpart|true". + // + // If the environment variable is set, we assume the client is new enough + // to handle a seventh segment, as it should now use + // strings.Split(line, "|") and always handle each segment individually. + if os.Getenv(envMultiplexGRPC) != "" { + protocolLine += fmt.Sprintf("|%v", grpcBrokerMultiplexingSupported) + } + fmt.Printf("%s\n", protocolLine) os.Stdout.Sync() } else if ch := opts.Test.ReattachConfigCh; ch != nil { // Send back the reattach config that can be used. This isn't @@ -547,7 +576,7 @@ func serverListener_tcp() (net.Listener, error) { } func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) { - tf, err := os.CreateTemp(unixSocketCfg.directory, "plugin") + tf, err := os.CreateTemp(unixSocketCfg.socketDir, "plugin") if err != nil { return nil, err } @@ -578,10 +607,7 @@ func serverListener_unix(unixSocketCfg UnixSocketConfig) (net.Listener, error) { // Wrap the listener in rmListener so that the Unix domain socket file // is removed on close. - return &rmListener{ - Listener: l, - Path: path, - }, nil + return newDeleteFileListener(l, path), nil } func setGroupWritable(path, groupString string, mode os.FileMode) error { @@ -611,11 +637,21 @@ func setGroupWritable(path, groupString string, mode os.FileMode) error { } // rmListener is an implementation of net.Listener that forwards most -// calls to the listener but also removes a file as part of the close. We -// use this to cleanup the unix domain socket on close. +// calls to the listener but also calls an additional close function. We +// use this to cleanup the unix domain socket on close, as well as clean +// up multiplexed listeners. type rmListener struct { net.Listener - Path string + close func() error +} + +func newDeleteFileListener(ln net.Listener, path string) *rmListener { + return &rmListener{ + Listener: ln, + close: func() error { + return os.Remove(path) + }, + } } func (l *rmListener) Close() error { @@ -625,5 +661,5 @@ func (l *rmListener) Close() error { } // Remove the file - return os.Remove(l.Path) + return l.close() } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/testing.go b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/testing.go index ae48b7a37eb..a8735dfc8c7 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/testing.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/go-plugin/testing.go @@ -11,7 +11,7 @@ import ( "net/rpc" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-plugin/internal/plugin" + "github.com/hashicorp/go-plugin/internal/grpcmux" "github.com/mitchellh/go-testing-interface" "google.golang.org/grpc" ) @@ -135,49 +135,51 @@ func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, * // TestPluginGRPCConn returns a plugin gRPC client and server that are connected // together and configured. This is used to test gRPC connections. -func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { +func TestPluginGRPCConn(t testing.T, multiplex bool, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { // Create a listener - l, err := net.Listen("tcp", "127.0.0.1:0") + ln, err := serverListener(UnixSocketConfig{}) if err != nil { - t.Fatalf("err: %s", err) + t.Fatal(err) } + logger := hclog.New(&hclog.LoggerOptions{ + Level: hclog.Debug, + }) + // Start up the server + var muxer *grpcmux.GRPCServerMuxer + if multiplex { + muxer = grpcmux.NewGRPCServerMuxer(logger, ln) + ln = muxer + } server := &GRPCServer{ Plugins: ps, DoneCh: make(chan struct{}), Server: DefaultGRPCServer, Stdout: new(bytes.Buffer), Stderr: new(bytes.Buffer), - logger: hclog.Default(), + logger: logger, + muxer: muxer, } if err := server.Init(); err != nil { t.Fatalf("err: %s", err) } - go server.Serve(l) - - // Connect to the server - conn, err := grpc.Dial( - l.Addr().String(), - grpc.WithBlock(), - grpc.WithInsecure()) - if err != nil { - t.Fatalf("err: %s", err) + go server.Serve(ln) + + client := &Client{ + address: ln.Addr(), + protocol: ProtocolGRPC, + config: &ClientConfig{ + Plugins: ps, + GRPCBrokerMultiplex: multiplex, + }, + logger: logger, } - brokerGRPCClient := newGRPCBrokerClient(conn) - broker := newGRPCBroker(brokerGRPCClient, nil, UnixSocketConfig{}, nil) - go broker.Run() - go brokerGRPCClient.StartStream() - - // Create the client - client := &GRPCClient{ - Conn: conn, - Plugins: ps, - broker: broker, - doneCtx: context.Background(), - controller: plugin.NewGRPCControllerClient(conn), + grpcClient, err := newGRPCClient(context.Background(), client) + if err != nil { + t.Fatal(err) } - return client, server + return grpcClient, server } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/hc-install/.go-version b/.ci/providerlint/vendor/github.com/hashicorp/hc-install/.go-version index 95393fc7d4d..20a1265cf39 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/hc-install/.go-version +++ b/.ci/providerlint/vendor/github.com/hashicorp/hc-install/.go-version @@ -1 +1 @@ -1.20.8 +1.21.4 diff --git a/.ci/providerlint/vendor/github.com/hashicorp/hc-install/version/VERSION b/.ci/providerlint/vendor/github.com/hashicorp/hc-install/version/VERSION index ee6cdce3c29..b6160487433 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/hc-install/version/VERSION +++ b/.ci/providerlint/vendor/github.com/hashicorp/hc-install/version/VERSION @@ -1 +1 @@ -0.6.1 +0.6.2 diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/README.md b/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/README.md index 4a9cd94a119..462c1a819d3 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/README.md +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/README.md @@ -15,7 +15,39 @@ This repository also serves as de facto documentation for the formats produced by these commands. For more details, see the [GoDoc](https://godoc.org/github.com/hashicorp/terraform-json). -## Why a Separate Repository? +## Should I use this library? + +This library was built for a few specific applications, and is not intended for +general purpose use. + +The Terraform core team **recommends against** using `terraform-json` if your +application has any of the following requirements: + +* **Forward-compatibility**: each version of this library represents a specific + snapshot of the [Terraform JSON output format](https://developer.hashicorp.com/terraform/internals/json-format), + and it often slightly lags behind Terraform itself. The library supports + [the 1.x compatibility promises](https://developer.hashicorp.com/terraform/language/v1-compatibility-promises) + but you will need to upgrade the version promptly to use new additions. If you + require full compatibility with future Terraform versions, we recommend + implementing your own custom decoders for the parts of the JSON format you need. +* **Writing JSON output**: the structures in this library are not guaranteed to emit + JSON data which is semantically equivalent to Terraform itself. If your application + must robustly write JSON data to be consumed by systems which expect Terraform's + format to be supported, you should implement your own custom encoders. +* **Filtering or round-tripping**: the Terraform JSON formats are designed to be + forwards compatible, and permit new attributes to be added which may safely be + ignored by earlier versions of consumers. This library **drops unknown attributes**, + which means it is unsuitable for any application which intends to filter data + or read-modify-write data which will be consumed downstream. Any application doing + this will silently drop new data from new versions. For this application, you should + implement a custom decoder and encoder which preserves any unknown attributes + through a round-trip. + +When is `terraform-json` suitable? We recommend using it for applications which +decode the core stable data types and use it directly, and don't attempt to emit +JSON to be consumed by applications which expect the Terraform format. + +## Why a separate repository? To reduce dependencies on any of Terraform core's internals, we've made a design decision to make any helpers or libraries that work with the external JSON data diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/plan.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/plan.go index de529accc05..b6583c08b05 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/plan.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/plan.go @@ -144,6 +144,10 @@ type ResourceChange struct { // The absolute resource address. Address string `json:"address,omitempty"` + // The absolute address that this resource instance had + // at the conclusion of a previous plan. + PreviousAddress string `json:"previous_address,omitempty"` + // The module portion of the above address. Omitted if the instance // is in the root module. ModuleAddress string `json:"module_address,omitempty"` diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/state.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/state.go index 0f2a9996966..e5336329b84 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/state.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-json/state.go @@ -38,7 +38,7 @@ type State struct { // Checks contains the results of any conditional checks when Values was // last updated. - Checks *CheckResultStatic `json:"checks,omitempty"` + Checks []CheckResultStatic `json:"checks,omitempty"` } // UseJSONNumber controls whether the State will be decoded using the diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go index 2cfacba63e4..b33be9ef6a1 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/internal/logging/keys.go @@ -11,6 +11,9 @@ const ( // Attribute of the diagnostic being logged. KeyDiagnosticAttribute = "diagnostic_attribute" + // Function Argument of the diagnostic being logged. + KeyDiagnosticFunctionArgument = "diagnostic_function_argument" + // Number of the error diagnostics. KeyDiagnosticErrorCount = "diagnostic_error_count" diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go index 15ab6a4ab13..5edb8b9becc 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/diagnostic.go @@ -42,6 +42,10 @@ type Diagnostic struct { // indicate that the problem is with a certain field in the resource, // which helps users find the source of the problem. Attribute *tftypes.AttributePath + + // FunctionArgument is the positional function argument for aligning + // configuration source. + FunctionArgument *int64 } // DiagnosticSeverity represents different classes of Diagnostic which affect diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go new file mode 100644 index 00000000000..30fda0ab3aa --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/function.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov5 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Function describes the definition of a function. Result must be defined. +type Function struct { + // Parameters is the ordered list of positional function parameters. + Parameters []*FunctionParameter + + // VariadicParameter is an optional final parameter which accepts zero or + // more argument values, in which Terraform will send an ordered list of the + // parameter type. + VariadicParameter *FunctionParameter + + // Return is the function result. + Return *FunctionReturn + + // Summary is the shortened human-readable documentation for the function. + Summary string + + // Description is the longer human-readable documentation for the function. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // DeprecationMessage is the human-readable documentation if the function + // is deprecated. This message should be practitioner oriented to explain + // how their configuration should be updated. + DeprecationMessage string +} + +// FunctionMetadata describes metadata for a function in the GetMetadata RPC. +type FunctionMetadata struct { + // Name is the name of the function. + Name string +} + +// FunctionParameter describes the definition of a function parameter. Type must +// be defined. +type FunctionParameter struct { + // AllowNullValue when enabled denotes that a null argument value can be + // passed to the provider. When disabled, Terraform returns an error if the + // argument value is null. + AllowNullValue bool + + // AllowUnknownValues when enabled denotes that any unknown argument value + // (recursively checked for collections) can be passed to the provider. When + // disabled and an unknown value is present, Terraform skips the function + // call entirely and returns an unknown value result from the function. + AllowUnknownValues bool + + // Description is the human-readable documentation for the parameter. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Name is the human-readable display name for the parameter. Parameters + // are by definition positional and this name is only used in documentation. + Name string + + // Type indicates the type of data the parameter expects. + Type tftypes.Type +} + +// FunctionReturn describes the definition of a function result. Type must be +// defined. +type FunctionReturn struct { + // Type indicates the type of return data. + Type tftypes.Type +} + +// FunctionServer is an interface containing the methods a function +// implementation needs to fill. +type FunctionServer interface { + // CallFunction is called when Terraform wants to execute the logic of a + // function referenced in the configuration. + CallFunction(context.Context, *CallFunctionRequest) (*CallFunctionResponse, error) + + // GetFunctions is called when Terraform wants to lookup which functions a + // provider supports when not calling GetProviderSchema. + GetFunctions(context.Context, *GetFunctionsRequest) (*GetFunctionsResponse, error) +} + +// CallFunctionRequest is the request Terraform sends when it wants to execute +// the logic of function referenced in the configuration. +type CallFunctionRequest struct { + // Name is the function name being called. + Name string + + // Arguments is the configuration value of each argument the practitioner + // supplied for the function call. The ordering and value of each element + // matches the function parameters and their associated type. If the + // function definition includes a final variadic parameter, its value is an + // ordered list of the variadic parameter type. + Arguments []*DynamicValue +} + +// CallFunctionResponse is the response from the provider with the result of +// executing the logic of the function. +type CallFunctionResponse struct { + // Diagnostics report errors or warnings related to the execution of the + // function logic. Returning an empty slice indicates a successful response + // with no warnings or errors presented to practitioners. + Diagnostics []*Diagnostic + + // Result is the return value from the called function, matching the result + // type in the function definition. + Result *DynamicValue +} + +// GetFunctionsRequest is the request Terraform sends when it wants to lookup +// which functions a provider supports when not calling GetProviderSchema. +type GetFunctionsRequest struct{} + +// GetFunctionsResponse is the response from the provider about the implemented +// functions. +type GetFunctionsResponse struct { + // Diagnostics report errors or warnings related to the provider + // implementation. Returning an empty slice indicates a successful response + // with no warnings or errors presented to practitioners. + Diagnostics []*Diagnostic + + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go index cc40d861fce..25703b4ce69 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/diag/diagnostics.go @@ -53,6 +53,10 @@ func (d Diagnostics) Log(ctx context.Context) { diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() } + if diagnostic.FunctionArgument != nil { + diagnosticFields[logging.KeyDiagnosticFunctionArgument] = *diagnostic.FunctionArgument + } + switch diagnostic.Severity { case tfprotov5.DiagnosticSeverityError: logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go new file mode 100644 index 00000000000..29ff0fb8910 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/fromproto/function.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func CallFunctionRequest(in *tfplugin5.CallFunction_Request) (*tfprotov5.CallFunctionRequest, error) { + if in == nil { + return nil, nil + } + + resp := &tfprotov5.CallFunctionRequest{ + Arguments: make([]*tfprotov5.DynamicValue, 0, len(in.Arguments)), + Name: in.Name, + } + + for _, argument := range in.Arguments { + resp.Arguments = append(resp.Arguments, DynamicValue(argument)) + } + + return resp, nil +} + +func GetFunctionsRequest(in *tfplugin5.GetFunctions_Request) (*tfprotov5.GetFunctionsRequest, error) { + if in == nil { + return nil, nil + } + + resp := &tfprotov5.GetFunctionsRequest{} + + return resp, nil +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go index b00fa1be15f..9310dfbf2e4 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 5.4 +// Terraform Plugin RPC protocol version 5.5 // -// This file defines version 5.4 of the RPC protocol. To implement a plugin +// This file defines version 5.5 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -261,6 +261,9 @@ type Diagnostic struct { Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + // function_argument is the positional function argument for aligning + // configuration source. + FunctionArgument *int64 `protobuf:"varint,5,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"` } func (x *Diagnostic) Reset() { @@ -323,6 +326,13 @@ func (x *Diagnostic) GetAttribute() *AttributePath { return nil } +func (x *Diagnostic) GetFunctionArgument() int64 { + if x != nil && x.FunctionArgument != nil { + return *x.FunctionArgument + } + return 0 +} + type AttributePath struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -591,6 +601,111 @@ func (x *ServerCapabilities) GetGetProviderSchemaOptional() bool { return false } +type Function struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parameters is the ordered list of positional function parameters. + Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"` + // return is the function result. + Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"` + // summary is the human-readable shortened documentation for the function. + Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + // description is human-readable documentation for the function. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` + // deprecation_message is human-readable documentation if the + // function is deprecated. + DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` +} + +func (x *Function) Reset() { + *x = Function{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function) ProtoMessage() {} + +func (x *Function) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function.ProtoReflect.Descriptor instead. +func (*Function) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7} +} + +func (x *Function) GetParameters() []*Function_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Function) GetVariadicParameter() *Function_Parameter { + if x != nil { + return x.VariadicParameter + } + return nil +} + +func (x *Function) GetReturn() *Function_Return { + if x != nil { + return x.Return + } + return nil +} + +func (x *Function) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Function) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Function) GetDeprecationMessage() string { + if x != nil { + return x.DeprecationMessage + } + return "" +} + type GetMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -600,7 +715,7 @@ type GetMetadata struct { func (x *GetMetadata) Reset() { *x = GetMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[7] + mi := &file_tfplugin5_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -613,7 +728,7 @@ func (x *GetMetadata) String() string { func (*GetMetadata) ProtoMessage() {} func (x *GetMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[7] + mi := &file_tfplugin5_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -626,7 +741,7 @@ func (x *GetMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{7} + return file_tfplugin5_proto_rawDescGZIP(), []int{8} } type GetProviderSchema struct { @@ -638,7 +753,7 @@ type GetProviderSchema struct { func (x *GetProviderSchema) Reset() { *x = GetProviderSchema{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[8] + mi := &file_tfplugin5_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -651,7 +766,7 @@ func (x *GetProviderSchema) String() string { func (*GetProviderSchema) ProtoMessage() {} func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[8] + mi := &file_tfplugin5_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -664,7 +779,7 @@ func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{8} + return file_tfplugin5_proto_rawDescGZIP(), []int{9} } type PrepareProviderConfig struct { @@ -676,7 +791,7 @@ type PrepareProviderConfig struct { func (x *PrepareProviderConfig) Reset() { *x = PrepareProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[9] + mi := &file_tfplugin5_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -689,7 +804,7 @@ func (x *PrepareProviderConfig) String() string { func (*PrepareProviderConfig) ProtoMessage() {} func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[9] + mi := &file_tfplugin5_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -702,7 +817,7 @@ func (x *PrepareProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareProviderConfig.ProtoReflect.Descriptor instead. func (*PrepareProviderConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9} + return file_tfplugin5_proto_rawDescGZIP(), []int{10} } type UpgradeResourceState struct { @@ -714,7 +829,7 @@ type UpgradeResourceState struct { func (x *UpgradeResourceState) Reset() { *x = UpgradeResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[10] + mi := &file_tfplugin5_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -727,7 +842,7 @@ func (x *UpgradeResourceState) String() string { func (*UpgradeResourceState) ProtoMessage() {} func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[10] + mi := &file_tfplugin5_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -740,7 +855,7 @@ func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{10} + return file_tfplugin5_proto_rawDescGZIP(), []int{11} } type ValidateResourceTypeConfig struct { @@ -752,7 +867,7 @@ type ValidateResourceTypeConfig struct { func (x *ValidateResourceTypeConfig) Reset() { *x = ValidateResourceTypeConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[11] + mi := &file_tfplugin5_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -765,7 +880,7 @@ func (x *ValidateResourceTypeConfig) String() string { func (*ValidateResourceTypeConfig) ProtoMessage() {} func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[11] + mi := &file_tfplugin5_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -778,7 +893,7 @@ func (x *ValidateResourceTypeConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceTypeConfig.ProtoReflect.Descriptor instead. func (*ValidateResourceTypeConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{11} + return file_tfplugin5_proto_rawDescGZIP(), []int{12} } type ValidateDataSourceConfig struct { @@ -790,7 +905,7 @@ type ValidateDataSourceConfig struct { func (x *ValidateDataSourceConfig) Reset() { *x = ValidateDataSourceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[12] + mi := &file_tfplugin5_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -803,7 +918,7 @@ func (x *ValidateDataSourceConfig) String() string { func (*ValidateDataSourceConfig) ProtoMessage() {} func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[12] + mi := &file_tfplugin5_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -816,7 +931,7 @@ func (x *ValidateDataSourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateDataSourceConfig.ProtoReflect.Descriptor instead. func (*ValidateDataSourceConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{12} + return file_tfplugin5_proto_rawDescGZIP(), []int{13} } type Configure struct { @@ -828,7 +943,7 @@ type Configure struct { func (x *Configure) Reset() { *x = Configure{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[13] + mi := &file_tfplugin5_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -841,7 +956,7 @@ func (x *Configure) String() string { func (*Configure) ProtoMessage() {} func (x *Configure) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[13] + mi := &file_tfplugin5_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -854,7 +969,7 @@ func (x *Configure) ProtoReflect() protoreflect.Message { // Deprecated: Use Configure.ProtoReflect.Descriptor instead. func (*Configure) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{13} + return file_tfplugin5_proto_rawDescGZIP(), []int{14} } type ReadResource struct { @@ -866,7 +981,7 @@ type ReadResource struct { func (x *ReadResource) Reset() { *x = ReadResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[14] + mi := &file_tfplugin5_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -879,7 +994,7 @@ func (x *ReadResource) String() string { func (*ReadResource) ProtoMessage() {} func (x *ReadResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[14] + mi := &file_tfplugin5_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -892,7 +1007,7 @@ func (x *ReadResource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. func (*ReadResource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{14} + return file_tfplugin5_proto_rawDescGZIP(), []int{15} } type PlanResourceChange struct { @@ -904,7 +1019,7 @@ type PlanResourceChange struct { func (x *PlanResourceChange) Reset() { *x = PlanResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[15] + mi := &file_tfplugin5_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -917,7 +1032,7 @@ func (x *PlanResourceChange) String() string { func (*PlanResourceChange) ProtoMessage() {} func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[15] + mi := &file_tfplugin5_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -930,7 +1045,7 @@ func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{15} + return file_tfplugin5_proto_rawDescGZIP(), []int{16} } type ApplyResourceChange struct { @@ -942,7 +1057,7 @@ type ApplyResourceChange struct { func (x *ApplyResourceChange) Reset() { *x = ApplyResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[16] + mi := &file_tfplugin5_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -955,7 +1070,7 @@ func (x *ApplyResourceChange) String() string { func (*ApplyResourceChange) ProtoMessage() {} func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[16] + mi := &file_tfplugin5_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -968,7 +1083,7 @@ func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{16} + return file_tfplugin5_proto_rawDescGZIP(), []int{17} } type ImportResourceState struct { @@ -980,7 +1095,7 @@ type ImportResourceState struct { func (x *ImportResourceState) Reset() { *x = ImportResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[17] + mi := &file_tfplugin5_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -993,7 +1108,7 @@ func (x *ImportResourceState) String() string { func (*ImportResourceState) ProtoMessage() {} func (x *ImportResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[17] + mi := &file_tfplugin5_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1006,7 +1121,7 @@ func (x *ImportResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. func (*ImportResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17} + return file_tfplugin5_proto_rawDescGZIP(), []int{18} } type ReadDataSource struct { @@ -1018,7 +1133,7 @@ type ReadDataSource struct { func (x *ReadDataSource) Reset() { *x = ReadDataSource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[18] + mi := &file_tfplugin5_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1031,7 +1146,7 @@ func (x *ReadDataSource) String() string { func (*ReadDataSource) ProtoMessage() {} func (x *ReadDataSource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[18] + mi := &file_tfplugin5_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1044,7 +1159,7 @@ func (x *ReadDataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. func (*ReadDataSource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{18} + return file_tfplugin5_proto_rawDescGZIP(), []int{19} } type GetProvisionerSchema struct { @@ -1056,7 +1171,7 @@ type GetProvisionerSchema struct { func (x *GetProvisionerSchema) Reset() { *x = GetProvisionerSchema{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[19] + mi := &file_tfplugin5_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1069,7 +1184,7 @@ func (x *GetProvisionerSchema) String() string { func (*GetProvisionerSchema) ProtoMessage() {} func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[19] + mi := &file_tfplugin5_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1082,7 +1197,7 @@ func (x *GetProvisionerSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvisionerSchema.ProtoReflect.Descriptor instead. func (*GetProvisionerSchema) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19} + return file_tfplugin5_proto_rawDescGZIP(), []int{20} } type ValidateProvisionerConfig struct { @@ -1094,7 +1209,7 @@ type ValidateProvisionerConfig struct { func (x *ValidateProvisionerConfig) Reset() { *x = ValidateProvisionerConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[20] + mi := &file_tfplugin5_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1107,7 +1222,7 @@ func (x *ValidateProvisionerConfig) String() string { func (*ValidateProvisionerConfig) ProtoMessage() {} func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[20] + mi := &file_tfplugin5_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1120,7 +1235,7 @@ func (x *ValidateProvisionerConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProvisionerConfig.ProtoReflect.Descriptor instead. func (*ValidateProvisionerConfig) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{20} + return file_tfplugin5_proto_rawDescGZIP(), []int{21} } type ProvisionResource struct { @@ -1132,7 +1247,7 @@ type ProvisionResource struct { func (x *ProvisionResource) Reset() { *x = ProvisionResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[21] + mi := &file_tfplugin5_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1145,7 +1260,7 @@ func (x *ProvisionResource) String() string { func (*ProvisionResource) ProtoMessage() {} func (x *ProvisionResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[21] + mi := &file_tfplugin5_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1158,7 +1273,83 @@ func (x *ProvisionResource) ProtoReflect() protoreflect.Message { // Deprecated: Use ProvisionResource.ProtoReflect.Descriptor instead. func (*ProvisionResource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{21} + return file_tfplugin5_proto_rawDescGZIP(), []int{22} +} + +type GetFunctions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions) Reset() { + *x = GetFunctions{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions) ProtoMessage() {} + +func (x *GetFunctions) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. +func (*GetFunctions) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23} +} + +type CallFunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CallFunction) Reset() { + *x = CallFunction{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction) ProtoMessage() {} + +func (x *CallFunction) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. +func (*CallFunction) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24} } type AttributePath_Step struct { @@ -1177,7 +1368,7 @@ type AttributePath_Step struct { func (x *AttributePath_Step) Reset() { *x = AttributePath_Step{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[22] + mi := &file_tfplugin5_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1190,7 +1381,7 @@ func (x *AttributePath_Step) String() string { func (*AttributePath_Step) ProtoMessage() {} func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[22] + mi := &file_tfplugin5_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1269,7 +1460,7 @@ type Stop_Request struct { func (x *Stop_Request) Reset() { *x = Stop_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[23] + mi := &file_tfplugin5_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1282,7 +1473,7 @@ func (x *Stop_Request) String() string { func (*Stop_Request) ProtoMessage() {} func (x *Stop_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[23] + mi := &file_tfplugin5_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1309,7 +1500,7 @@ type Stop_Response struct { func (x *Stop_Response) Reset() { *x = Stop_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[24] + mi := &file_tfplugin5_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1322,7 +1513,7 @@ func (x *Stop_Response) String() string { func (*Stop_Response) ProtoMessage() {} func (x *Stop_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[24] + mi := &file_tfplugin5_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1361,7 +1552,7 @@ type Schema_Block struct { func (x *Schema_Block) Reset() { *x = Schema_Block{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[26] + mi := &file_tfplugin5_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1374,7 +1565,7 @@ func (x *Schema_Block) String() string { func (*Schema_Block) ProtoMessage() {} func (x *Schema_Block) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[26] + mi := &file_tfplugin5_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1451,7 +1642,7 @@ type Schema_Attribute struct { func (x *Schema_Attribute) Reset() { *x = Schema_Attribute{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[27] + mi := &file_tfplugin5_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1464,7 +1655,7 @@ func (x *Schema_Attribute) String() string { func (*Schema_Attribute) ProtoMessage() {} func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[27] + mi := &file_tfplugin5_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1558,7 +1749,7 @@ type Schema_NestedBlock struct { func (x *Schema_NestedBlock) Reset() { *x = Schema_NestedBlock{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[28] + mi := &file_tfplugin5_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1571,7 +1762,7 @@ func (x *Schema_NestedBlock) String() string { func (*Schema_NestedBlock) ProtoMessage() {} func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[28] + mi := &file_tfplugin5_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1622,29 +1813,47 @@ func (x *Schema_NestedBlock) GetMaxItems() int64 { return 0 } -type GetMetadata_Request struct { +type Function_Parameter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -} -func (x *GetMetadata_Request) Reset() { - *x = GetMetadata_Request{} + // name is the human-readable display name for the parameter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // type is the type constraint for the parameter. + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"` + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"` + // description is human-readable documentation for the parameter. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin5.StringKind" json:"description_kind,omitempty"` +} + +func (x *Function_Parameter) Reset() { + *x = Function_Parameter{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[29] + mi := &file_tfplugin5_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetMetadata_Request) String() string { +func (x *Function_Parameter) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetMetadata_Request) ProtoMessage() {} +func (*Function_Parameter) ProtoMessage() {} -func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[29] +func (x *Function_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1655,26 +1864,156 @@ func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. -func (*GetMetadata_Request) Descriptor() ([]byte, []int) { +// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. +func (*Function_Parameter) Descriptor() ([]byte, []int) { return file_tfplugin5_proto_rawDescGZIP(), []int{7, 0} } -type GetMetadata_Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - +func (x *Function_Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Function_Parameter) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Function_Parameter) GetAllowNullValue() bool { + if x != nil { + return x.AllowNullValue + } + return false +} + +func (x *Function_Parameter) GetAllowUnknownValues() bool { + if x != nil { + return x.AllowUnknownValues + } + return false +} + +func (x *Function_Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function_Parameter) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +type Function_Return struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the type constraint for the function result. + Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Function_Return) Reset() { + *x = Function_Return{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Return) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Return) ProtoMessage() {} + +func (x *Function_Return) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. +func (*Function_Return) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{7, 1} +} + +func (x *Function_Return) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +type GetMetadata_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata_Request) Reset() { + *x = GetMetadata_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Request) ProtoMessage() {} + +func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. +func (*GetMetadata_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 0} +} + +type GetMetadata_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + // functions returns metadata for any functions. + Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"` } func (x *GetMetadata_Response) Reset() { *x = GetMetadata_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[30] + mi := &file_tfplugin5_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1687,7 +2026,7 @@ func (x *GetMetadata_Response) String() string { func (*GetMetadata_Response) ProtoMessage() {} func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[30] + mi := &file_tfplugin5_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1700,7 +2039,7 @@ func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. func (*GetMetadata_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{7, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 1} } func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { @@ -1731,6 +2070,61 @@ func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata { return nil } +func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata { + if x != nil { + return x.Functions + } + return nil +} + +type GetMetadata_FunctionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the function name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetadata_FunctionMetadata) Reset() { + *x = GetMetadata_FunctionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_FunctionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_FunctionMetadata) ProtoMessage() {} + +func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 2} +} + +func (x *GetMetadata_FunctionMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + type GetMetadata_DataSourceMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1742,7 +2136,7 @@ type GetMetadata_DataSourceMetadata struct { func (x *GetMetadata_DataSourceMetadata) Reset() { *x = GetMetadata_DataSourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[31] + mi := &file_tfplugin5_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1755,7 +2149,7 @@ func (x *GetMetadata_DataSourceMetadata) String() string { func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[31] + mi := &file_tfplugin5_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1768,7 +2162,7 @@ func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{7, 2} + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 3} } func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { @@ -1789,7 +2183,7 @@ type GetMetadata_ResourceMetadata struct { func (x *GetMetadata_ResourceMetadata) Reset() { *x = GetMetadata_ResourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[32] + mi := &file_tfplugin5_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1802,7 +2196,7 @@ func (x *GetMetadata_ResourceMetadata) String() string { func (*GetMetadata_ResourceMetadata) ProtoMessage() {} func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[32] + mi := &file_tfplugin5_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1815,7 +2209,7 @@ func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{7, 3} + return file_tfplugin5_proto_rawDescGZIP(), []int{8, 4} } func (x *GetMetadata_ResourceMetadata) GetTypeName() string { @@ -1834,7 +2228,7 @@ type GetProviderSchema_Request struct { func (x *GetProviderSchema_Request) Reset() { *x = GetProviderSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[33] + mi := &file_tfplugin5_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1847,7 +2241,7 @@ func (x *GetProviderSchema_Request) String() string { func (*GetProviderSchema_Request) ProtoMessage() {} func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[33] + mi := &file_tfplugin5_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1860,7 +2254,7 @@ func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{8, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} } type GetProviderSchema_Response struct { @@ -1874,12 +2268,14 @@ type GetProviderSchema_Response struct { Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GetProviderSchema_Response) Reset() { *x = GetProviderSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[34] + mi := &file_tfplugin5_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1892,7 +2288,7 @@ func (x *GetProviderSchema_Response) String() string { func (*GetProviderSchema_Response) ProtoMessage() {} func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[34] + mi := &file_tfplugin5_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1905,7 +2301,7 @@ func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{8, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} } func (x *GetProviderSchema_Response) GetProvider() *Schema { @@ -1950,6 +2346,13 @@ func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities return nil } +func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + type PrepareProviderConfig_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1961,7 +2364,7 @@ type PrepareProviderConfig_Request struct { func (x *PrepareProviderConfig_Request) Reset() { *x = PrepareProviderConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[37] + mi := &file_tfplugin5_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1974,7 +2377,7 @@ func (x *PrepareProviderConfig_Request) String() string { func (*PrepareProviderConfig_Request) ProtoMessage() {} func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[37] + mi := &file_tfplugin5_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1987,7 +2390,7 @@ func (x *PrepareProviderConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareProviderConfig_Request.ProtoReflect.Descriptor instead. func (*PrepareProviderConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} } func (x *PrepareProviderConfig_Request) GetConfig() *DynamicValue { @@ -2009,7 +2412,7 @@ type PrepareProviderConfig_Response struct { func (x *PrepareProviderConfig_Response) Reset() { *x = PrepareProviderConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[38] + mi := &file_tfplugin5_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2022,7 +2425,7 @@ func (x *PrepareProviderConfig_Response) String() string { func (*PrepareProviderConfig_Response) ProtoMessage() {} func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[38] + mi := &file_tfplugin5_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2035,7 +2438,7 @@ func (x *PrepareProviderConfig_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use PrepareProviderConfig_Response.ProtoReflect.Descriptor instead. func (*PrepareProviderConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{9, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{10, 1} } func (x *PrepareProviderConfig_Response) GetPreparedConfig() *DynamicValue { @@ -2080,7 +2483,7 @@ type UpgradeResourceState_Request struct { func (x *UpgradeResourceState_Request) Reset() { *x = UpgradeResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[39] + mi := &file_tfplugin5_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2093,7 +2496,7 @@ func (x *UpgradeResourceState_Request) String() string { func (*UpgradeResourceState_Request) ProtoMessage() {} func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[39] + mi := &file_tfplugin5_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2106,7 +2509,7 @@ func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{10, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} } func (x *UpgradeResourceState_Request) GetTypeName() string { @@ -2148,7 +2551,7 @@ type UpgradeResourceState_Response struct { func (x *UpgradeResourceState_Response) Reset() { *x = UpgradeResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[40] + mi := &file_tfplugin5_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2161,7 +2564,7 @@ func (x *UpgradeResourceState_Response) String() string { func (*UpgradeResourceState_Response) ProtoMessage() {} func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[40] + mi := &file_tfplugin5_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2174,7 +2577,7 @@ func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{10, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} } func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { @@ -2203,7 +2606,7 @@ type ValidateResourceTypeConfig_Request struct { func (x *ValidateResourceTypeConfig_Request) Reset() { *x = ValidateResourceTypeConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[41] + mi := &file_tfplugin5_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2216,7 +2619,7 @@ func (x *ValidateResourceTypeConfig_Request) String() string { func (*ValidateResourceTypeConfig_Request) ProtoMessage() {} func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[41] + mi := &file_tfplugin5_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2229,7 +2632,7 @@ func (x *ValidateResourceTypeConfig_Request) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateResourceTypeConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateResourceTypeConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{11, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} } func (x *ValidateResourceTypeConfig_Request) GetTypeName() string { @@ -2257,7 +2660,7 @@ type ValidateResourceTypeConfig_Response struct { func (x *ValidateResourceTypeConfig_Response) Reset() { *x = ValidateResourceTypeConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[42] + mi := &file_tfplugin5_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2270,7 +2673,7 @@ func (x *ValidateResourceTypeConfig_Response) String() string { func (*ValidateResourceTypeConfig_Response) ProtoMessage() {} func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[42] + mi := &file_tfplugin5_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2283,7 +2686,7 @@ func (x *ValidateResourceTypeConfig_Response) ProtoReflect() protoreflect.Messag // Deprecated: Use ValidateResourceTypeConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateResourceTypeConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{11, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} } func (x *ValidateResourceTypeConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2305,7 +2708,7 @@ type ValidateDataSourceConfig_Request struct { func (x *ValidateDataSourceConfig_Request) Reset() { *x = ValidateDataSourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[43] + mi := &file_tfplugin5_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2318,7 +2721,7 @@ func (x *ValidateDataSourceConfig_Request) String() string { func (*ValidateDataSourceConfig_Request) ProtoMessage() {} func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[43] + mi := &file_tfplugin5_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2331,7 +2734,7 @@ func (x *ValidateDataSourceConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateDataSourceConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateDataSourceConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{12, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} } func (x *ValidateDataSourceConfig_Request) GetTypeName() string { @@ -2359,7 +2762,7 @@ type ValidateDataSourceConfig_Response struct { func (x *ValidateDataSourceConfig_Response) Reset() { *x = ValidateDataSourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[44] + mi := &file_tfplugin5_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2372,7 +2775,7 @@ func (x *ValidateDataSourceConfig_Response) String() string { func (*ValidateDataSourceConfig_Response) ProtoMessage() {} func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[44] + mi := &file_tfplugin5_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2385,7 +2788,7 @@ func (x *ValidateDataSourceConfig_Response) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateDataSourceConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateDataSourceConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{12, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} } func (x *ValidateDataSourceConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2407,7 +2810,7 @@ type Configure_Request struct { func (x *Configure_Request) Reset() { *x = Configure_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[45] + mi := &file_tfplugin5_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2420,7 +2823,7 @@ func (x *Configure_Request) String() string { func (*Configure_Request) ProtoMessage() {} func (x *Configure_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[45] + mi := &file_tfplugin5_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2433,7 +2836,7 @@ func (x *Configure_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use Configure_Request.ProtoReflect.Descriptor instead. func (*Configure_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{13, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} } func (x *Configure_Request) GetTerraformVersion() string { @@ -2461,7 +2864,7 @@ type Configure_Response struct { func (x *Configure_Response) Reset() { *x = Configure_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[46] + mi := &file_tfplugin5_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2474,7 +2877,7 @@ func (x *Configure_Response) String() string { func (*Configure_Response) ProtoMessage() {} func (x *Configure_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[46] + mi := &file_tfplugin5_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2487,7 +2890,7 @@ func (x *Configure_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use Configure_Response.ProtoReflect.Descriptor instead. func (*Configure_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{13, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} } func (x *Configure_Response) GetDiagnostics() []*Diagnostic { @@ -2519,7 +2922,7 @@ type ReadResource_Request struct { func (x *ReadResource_Request) Reset() { *x = ReadResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[47] + mi := &file_tfplugin5_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2532,7 +2935,7 @@ func (x *ReadResource_Request) String() string { func (*ReadResource_Request) ProtoMessage() {} func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[47] + mi := &file_tfplugin5_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2545,7 +2948,7 @@ func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{14, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} } func (x *ReadResource_Request) GetTypeName() string { @@ -2589,7 +2992,7 @@ type ReadResource_Response struct { func (x *ReadResource_Response) Reset() { *x = ReadResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[48] + mi := &file_tfplugin5_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2602,7 +3005,7 @@ func (x *ReadResource_Response) String() string { func (*ReadResource_Response) ProtoMessage() {} func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[48] + mi := &file_tfplugin5_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2615,7 +3018,7 @@ func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{14, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} } func (x *ReadResource_Response) GetNewState() *DynamicValue { @@ -2655,7 +3058,7 @@ type PlanResourceChange_Request struct { func (x *PlanResourceChange_Request) Reset() { *x = PlanResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[49] + mi := &file_tfplugin5_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2668,7 +3071,7 @@ func (x *PlanResourceChange_Request) String() string { func (*PlanResourceChange_Request) ProtoMessage() {} func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[49] + mi := &file_tfplugin5_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2681,7 +3084,7 @@ func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{15, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} } func (x *PlanResourceChange_Request) GetTypeName() string { @@ -2752,7 +3155,7 @@ type PlanResourceChange_Response struct { func (x *PlanResourceChange_Response) Reset() { *x = PlanResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[50] + mi := &file_tfplugin5_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2765,7 +3168,7 @@ func (x *PlanResourceChange_Response) String() string { func (*PlanResourceChange_Response) ProtoMessage() {} func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[50] + mi := &file_tfplugin5_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2778,7 +3181,7 @@ func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{15, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} } func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { @@ -2832,7 +3235,7 @@ type ApplyResourceChange_Request struct { func (x *ApplyResourceChange_Request) Reset() { *x = ApplyResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[51] + mi := &file_tfplugin5_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2845,7 +3248,7 @@ func (x *ApplyResourceChange_Request) String() string { func (*ApplyResourceChange_Request) ProtoMessage() {} func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[51] + mi := &file_tfplugin5_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2858,7 +3261,7 @@ func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{16, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} } func (x *ApplyResourceChange_Request) GetTypeName() string { @@ -2928,7 +3331,7 @@ type ApplyResourceChange_Response struct { func (x *ApplyResourceChange_Response) Reset() { *x = ApplyResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[52] + mi := &file_tfplugin5_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2941,7 +3344,7 @@ func (x *ApplyResourceChange_Response) String() string { func (*ApplyResourceChange_Response) ProtoMessage() {} func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[52] + mi := &file_tfplugin5_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2954,7 +3357,7 @@ func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{16, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} } func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { @@ -2997,7 +3400,7 @@ type ImportResourceState_Request struct { func (x *ImportResourceState_Request) Reset() { *x = ImportResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[53] + mi := &file_tfplugin5_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3010,7 +3413,7 @@ func (x *ImportResourceState_Request) String() string { func (*ImportResourceState_Request) ProtoMessage() {} func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[53] + mi := &file_tfplugin5_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3023,7 +3426,7 @@ func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} } func (x *ImportResourceState_Request) GetTypeName() string { @@ -3053,7 +3456,7 @@ type ImportResourceState_ImportedResource struct { func (x *ImportResourceState_ImportedResource) Reset() { *x = ImportResourceState_ImportedResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[54] + mi := &file_tfplugin5_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3066,7 +3469,7 @@ func (x *ImportResourceState_ImportedResource) String() string { func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[54] + mi := &file_tfplugin5_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3079,7 +3482,7 @@ func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Messa // Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} } func (x *ImportResourceState_ImportedResource) GetTypeName() string { @@ -3115,7 +3518,7 @@ type ImportResourceState_Response struct { func (x *ImportResourceState_Response) Reset() { *x = ImportResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[55] + mi := &file_tfplugin5_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3128,7 +3531,7 @@ func (x *ImportResourceState_Response) String() string { func (*ImportResourceState_Response) ProtoMessage() {} func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[55] + mi := &file_tfplugin5_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3141,7 +3544,7 @@ func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{17, 2} + return file_tfplugin5_proto_rawDescGZIP(), []int{18, 2} } func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { @@ -3171,7 +3574,7 @@ type ReadDataSource_Request struct { func (x *ReadDataSource_Request) Reset() { *x = ReadDataSource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[56] + mi := &file_tfplugin5_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3184,7 +3587,7 @@ func (x *ReadDataSource_Request) String() string { func (*ReadDataSource_Request) ProtoMessage() {} func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[56] + mi := &file_tfplugin5_proto_msgTypes[63] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3197,7 +3600,7 @@ func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{18, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} } func (x *ReadDataSource_Request) GetTypeName() string { @@ -3233,7 +3636,7 @@ type ReadDataSource_Response struct { func (x *ReadDataSource_Response) Reset() { *x = ReadDataSource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[57] + mi := &file_tfplugin5_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3246,7 +3649,7 @@ func (x *ReadDataSource_Response) String() string { func (*ReadDataSource_Response) ProtoMessage() {} func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[57] + mi := &file_tfplugin5_proto_msgTypes[64] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3259,7 +3662,7 @@ func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{18, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} } func (x *ReadDataSource_Response) GetState() *DynamicValue { @@ -3285,7 +3688,7 @@ type GetProvisionerSchema_Request struct { func (x *GetProvisionerSchema_Request) Reset() { *x = GetProvisionerSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[58] + mi := &file_tfplugin5_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3298,7 +3701,7 @@ func (x *GetProvisionerSchema_Request) String() string { func (*GetProvisionerSchema_Request) ProtoMessage() {} func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[58] + mi := &file_tfplugin5_proto_msgTypes[65] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3311,7 +3714,7 @@ func (x *GetProvisionerSchema_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvisionerSchema_Request.ProtoReflect.Descriptor instead. func (*GetProvisionerSchema_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0} } type GetProvisionerSchema_Response struct { @@ -3326,7 +3729,7 @@ type GetProvisionerSchema_Response struct { func (x *GetProvisionerSchema_Response) Reset() { *x = GetProvisionerSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[59] + mi := &file_tfplugin5_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3339,7 +3742,7 @@ func (x *GetProvisionerSchema_Response) String() string { func (*GetProvisionerSchema_Response) ProtoMessage() {} func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[59] + mi := &file_tfplugin5_proto_msgTypes[66] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3352,7 +3755,7 @@ func (x *GetProvisionerSchema_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProvisionerSchema_Response.ProtoReflect.Descriptor instead. func (*GetProvisionerSchema_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{19, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1} } func (x *GetProvisionerSchema_Response) GetProvisioner() *Schema { @@ -3380,7 +3783,7 @@ type ValidateProvisionerConfig_Request struct { func (x *ValidateProvisionerConfig_Request) Reset() { *x = ValidateProvisionerConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[60] + mi := &file_tfplugin5_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3393,7 +3796,7 @@ func (x *ValidateProvisionerConfig_Request) String() string { func (*ValidateProvisionerConfig_Request) ProtoMessage() {} func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[60] + mi := &file_tfplugin5_proto_msgTypes[67] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3406,7 +3809,7 @@ func (x *ValidateProvisionerConfig_Request) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateProvisionerConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateProvisionerConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{20, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0} } func (x *ValidateProvisionerConfig_Request) GetConfig() *DynamicValue { @@ -3427,7 +3830,7 @@ type ValidateProvisionerConfig_Response struct { func (x *ValidateProvisionerConfig_Response) Reset() { *x = ValidateProvisionerConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[61] + mi := &file_tfplugin5_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3440,7 +3843,7 @@ func (x *ValidateProvisionerConfig_Response) String() string { func (*ValidateProvisionerConfig_Response) ProtoMessage() {} func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[61] + mi := &file_tfplugin5_proto_msgTypes[68] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3453,7 +3856,7 @@ func (x *ValidateProvisionerConfig_Response) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateProvisionerConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateProvisionerConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{20, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1} } func (x *ValidateProvisionerConfig_Response) GetDiagnostics() []*Diagnostic { @@ -3475,7 +3878,7 @@ type ProvisionResource_Request struct { func (x *ProvisionResource_Request) Reset() { *x = ProvisionResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[62] + mi := &file_tfplugin5_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3488,7 +3891,7 @@ func (x *ProvisionResource_Request) String() string { func (*ProvisionResource_Request) ProtoMessage() {} func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[62] + mi := &file_tfplugin5_proto_msgTypes[69] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3501,7 +3904,7 @@ func (x *ProvisionResource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ProvisionResource_Request.ProtoReflect.Descriptor instead. func (*ProvisionResource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{21, 0} + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 0} } func (x *ProvisionResource_Request) GetConfig() *DynamicValue { @@ -3530,7 +3933,7 @@ type ProvisionResource_Response struct { func (x *ProvisionResource_Response) Reset() { *x = ProvisionResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin5_proto_msgTypes[63] + mi := &file_tfplugin5_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3543,7 +3946,7 @@ func (x *ProvisionResource_Response) String() string { func (*ProvisionResource_Response) ProtoMessage() {} func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin5_proto_msgTypes[63] + mi := &file_tfplugin5_proto_msgTypes[70] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3556,7 +3959,7 @@ func (x *ProvisionResource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ProvisionResource_Response.ProtoReflect.Descriptor instead. func (*ProvisionResource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin5_proto_rawDescGZIP(), []int{21, 1} + return file_tfplugin5_proto_rawDescGZIP(), []int{22, 1} } func (x *ProvisionResource_Response) GetOutput() string { @@ -3573,6 +3976,215 @@ func (x *ProvisionResource_Response) GetDiagnostics() []*Diagnostic { return nil } +type GetFunctions_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions_Request) Reset() { + *x = GetFunctions_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[71] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Request) ProtoMessage() {} + +func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[71] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. +func (*GetFunctions_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 0} +} + +type GetFunctions_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // diagnostics is any warnings or errors. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetFunctions_Response) Reset() { + *x = GetFunctions_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[72] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Response) ProtoMessage() {} + +func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[72] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. +func (*GetFunctions_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{23, 1} +} + +func (x *GetFunctions_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type CallFunction_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the name of the function being called. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // arguments is the data of each function argument value. + Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` +} + +func (x *CallFunction_Request) Reset() { + *x = CallFunction_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[74] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Request) ProtoMessage() {} + +func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[74] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. +func (*CallFunction_Request) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 0} +} + +func (x *CallFunction_Request) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CallFunction_Request) GetArguments() []*DynamicValue { + if x != nil { + return x.Arguments + } + return nil +} + +type CallFunction_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // result is result value after running the function logic. + Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // diagnostics is any warnings or errors from the function logic. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *CallFunction_Response) Reset() { + *x = CallFunction_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin5_proto_msgTypes[75] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Response) ProtoMessage() {} + +func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin5_proto_msgTypes[75] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. +func (*CallFunction_Response) Descriptor() ([]byte, []int) { + return file_tfplugin5_proto_rawDescGZIP(), []int{24, 1} +} + +func (x *CallFunction_Response) GetResult() *DynamicValue { + if x != nil { + return x.Result + } + return nil +} + +func (x *CallFunction_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + var File_tfplugin5_proto protoreflect.FileDescriptor var file_tfplugin5_proto_rawDesc = []byte{ @@ -3581,7 +4193,7 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xab, 0x02, 0x0a, 0x0a, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, @@ -3592,414 +4204,507 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, - 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, - 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, - 0x22, 0xdc, 0x01, 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, - 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, - 0x12, 0x27, 0x0a, 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, - 0x49, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, - 0x33, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, - 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, - 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, - 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x07, - 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, - 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, - 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, - 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, + 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, + 0x01, 0x22, 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, + 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, + 0x10, 0x02, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01, 0x0a, 0x0d, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x1a, + 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, 0x0e, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x33, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x1a, + 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, + 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, + 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, + 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xcc, 0x07, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, + 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, + 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa9, + 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, + 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, + 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, + 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, + 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, + 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x4e, + 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, + 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, - 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, - 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, - 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, - 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa9, 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, - 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, - 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, - 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, - 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, - 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, - 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, - 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, - 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, - 0x65, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, - 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, - 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, - 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, - 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, - 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, - 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, - 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x22, 0x78, 0x0a, 0x12, - 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, - 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, - 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, - 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xa7, 0x03, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0xa8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, - 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, - 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, - 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, - 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, + 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, + 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, + 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, + 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, + 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, + 0x55, 0x50, 0x10, 0x05, 0x22, 0x78, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, + 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, + 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, + 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0x8e, + 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x32, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, + 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, + 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, + 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, + 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, + 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, + 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, + 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, + 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, + 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x31, 0x0a, 0x12, - 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, - 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x22, 0xa0, 0x05, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0xff, 0x04, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, - 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, - 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, - 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, 0x10, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, + 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, + 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, + 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, + 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, + 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, 0x66, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, + 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, + 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, + 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, + 0x22, 0xb8, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, + 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, - 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, - 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, - 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, - 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0xdb, 0x01, 0x0a, 0x15, 0x50, 0x72, 0x65, 0x70, 0x61, 0x72, 0x65, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, - 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb9, 0x01, 0x0a, 0x09, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, + 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, + 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x85, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x0f, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x70, 0x61, 0x72, - 0x65, 0x64, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, - 0x72, 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x61, 0x77, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, - 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, + 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, + 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, + 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, + 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, + 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, + 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, + 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x18, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, - 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, - 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xb9, 0x01, 0x0a, - 0x09, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x1a, 0x67, 0x0a, 0x07, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, - 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, 0x0c, 0x52, 0x65, 0x61, - 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, 0x0a, 0x07, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, + 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, + 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, + 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, + 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x22, 0xf2, - 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, - 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, - 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x70, - 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, + 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, + 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x10, 0x70, - 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, - 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, - 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x50, 0x72, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, + 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, - 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x43, - 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, - 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x52, 0x65, 0x70, 0x6c, - 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, - 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, - 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xb6, 0x02, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3c, - 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, - 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x27, 0x0a, - 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, - 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, - 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, - 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, + 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, + 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, + 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, + 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, - 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, - 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, - 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, - 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, + 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, + 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, - 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, - 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, - 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, - 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, + 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9b, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, + 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, + 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0b, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, - 0x5b, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, - 0x70, 0x75, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x1a, 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, + 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x73, 0x22, 0xe5, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0x73, 0x0a, 0x07, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, + 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x37, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x5b, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xda, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x74, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, - 0x4e, 0x10, 0x01, 0x32, 0xe7, 0x09, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4e, 0x10, 0x01, 0x32, 0x8d, 0x0b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -4074,40 +4779,50 @@ var file_tfplugin5_proto_rawDesc = []byte{ 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, - 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, - 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, - 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, - 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, + 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, + 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x35, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, + 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, + 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x32, 0x86, 0x03, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, + 0x6e, 0x65, 0x72, 0x12, 0x5e, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x12, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, + 0x6f, 0x6e, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x78, 0x0a, 0x19, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, - 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, - 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, + 0x12, 0x2c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x72, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x62, 0x0a, + 0x11, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x12, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, - 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, - 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, - 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, - 0x67, 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, + 0x01, 0x12, 0x39, 0x0a, 0x04, 0x53, 0x74, 0x6f, 0x70, 0x12, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x35, 0x2e, 0x53, + 0x74, 0x6f, 0x70, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, + 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x76, 0x35, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x35, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -4123,7 +4838,7 @@ func file_tfplugin5_proto_rawDescGZIP() []byte { } var file_tfplugin5_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 64) +var file_tfplugin5_proto_msgTypes = make([]protoimpl.MessageInfo, 76) var file_tfplugin5_proto_goTypes = []interface{}{ (StringKind)(0), // 0: tfplugin5.StringKind (Diagnostic_Severity)(0), // 1: tfplugin5.Diagnostic.Severity @@ -4135,170 +4850,200 @@ var file_tfplugin5_proto_goTypes = []interface{}{ (*RawState)(nil), // 7: tfplugin5.RawState (*Schema)(nil), // 8: tfplugin5.Schema (*ServerCapabilities)(nil), // 9: tfplugin5.ServerCapabilities - (*GetMetadata)(nil), // 10: tfplugin5.GetMetadata - (*GetProviderSchema)(nil), // 11: tfplugin5.GetProviderSchema - (*PrepareProviderConfig)(nil), // 12: tfplugin5.PrepareProviderConfig - (*UpgradeResourceState)(nil), // 13: tfplugin5.UpgradeResourceState - (*ValidateResourceTypeConfig)(nil), // 14: tfplugin5.ValidateResourceTypeConfig - (*ValidateDataSourceConfig)(nil), // 15: tfplugin5.ValidateDataSourceConfig - (*Configure)(nil), // 16: tfplugin5.Configure - (*ReadResource)(nil), // 17: tfplugin5.ReadResource - (*PlanResourceChange)(nil), // 18: tfplugin5.PlanResourceChange - (*ApplyResourceChange)(nil), // 19: tfplugin5.ApplyResourceChange - (*ImportResourceState)(nil), // 20: tfplugin5.ImportResourceState - (*ReadDataSource)(nil), // 21: tfplugin5.ReadDataSource - (*GetProvisionerSchema)(nil), // 22: tfplugin5.GetProvisionerSchema - (*ValidateProvisionerConfig)(nil), // 23: tfplugin5.ValidateProvisionerConfig - (*ProvisionResource)(nil), // 24: tfplugin5.ProvisionResource - (*AttributePath_Step)(nil), // 25: tfplugin5.AttributePath.Step - (*Stop_Request)(nil), // 26: tfplugin5.Stop.Request - (*Stop_Response)(nil), // 27: tfplugin5.Stop.Response - nil, // 28: tfplugin5.RawState.FlatmapEntry - (*Schema_Block)(nil), // 29: tfplugin5.Schema.Block - (*Schema_Attribute)(nil), // 30: tfplugin5.Schema.Attribute - (*Schema_NestedBlock)(nil), // 31: tfplugin5.Schema.NestedBlock - (*GetMetadata_Request)(nil), // 32: tfplugin5.GetMetadata.Request - (*GetMetadata_Response)(nil), // 33: tfplugin5.GetMetadata.Response - (*GetMetadata_DataSourceMetadata)(nil), // 34: tfplugin5.GetMetadata.DataSourceMetadata - (*GetMetadata_ResourceMetadata)(nil), // 35: tfplugin5.GetMetadata.ResourceMetadata - (*GetProviderSchema_Request)(nil), // 36: tfplugin5.GetProviderSchema.Request - (*GetProviderSchema_Response)(nil), // 37: tfplugin5.GetProviderSchema.Response - nil, // 38: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry - nil, // 39: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry - (*PrepareProviderConfig_Request)(nil), // 40: tfplugin5.PrepareProviderConfig.Request - (*PrepareProviderConfig_Response)(nil), // 41: tfplugin5.PrepareProviderConfig.Response - (*UpgradeResourceState_Request)(nil), // 42: tfplugin5.UpgradeResourceState.Request - (*UpgradeResourceState_Response)(nil), // 43: tfplugin5.UpgradeResourceState.Response - (*ValidateResourceTypeConfig_Request)(nil), // 44: tfplugin5.ValidateResourceTypeConfig.Request - (*ValidateResourceTypeConfig_Response)(nil), // 45: tfplugin5.ValidateResourceTypeConfig.Response - (*ValidateDataSourceConfig_Request)(nil), // 46: tfplugin5.ValidateDataSourceConfig.Request - (*ValidateDataSourceConfig_Response)(nil), // 47: tfplugin5.ValidateDataSourceConfig.Response - (*Configure_Request)(nil), // 48: tfplugin5.Configure.Request - (*Configure_Response)(nil), // 49: tfplugin5.Configure.Response - (*ReadResource_Request)(nil), // 50: tfplugin5.ReadResource.Request - (*ReadResource_Response)(nil), // 51: tfplugin5.ReadResource.Response - (*PlanResourceChange_Request)(nil), // 52: tfplugin5.PlanResourceChange.Request - (*PlanResourceChange_Response)(nil), // 53: tfplugin5.PlanResourceChange.Response - (*ApplyResourceChange_Request)(nil), // 54: tfplugin5.ApplyResourceChange.Request - (*ApplyResourceChange_Response)(nil), // 55: tfplugin5.ApplyResourceChange.Response - (*ImportResourceState_Request)(nil), // 56: tfplugin5.ImportResourceState.Request - (*ImportResourceState_ImportedResource)(nil), // 57: tfplugin5.ImportResourceState.ImportedResource - (*ImportResourceState_Response)(nil), // 58: tfplugin5.ImportResourceState.Response - (*ReadDataSource_Request)(nil), // 59: tfplugin5.ReadDataSource.Request - (*ReadDataSource_Response)(nil), // 60: tfplugin5.ReadDataSource.Response - (*GetProvisionerSchema_Request)(nil), // 61: tfplugin5.GetProvisionerSchema.Request - (*GetProvisionerSchema_Response)(nil), // 62: tfplugin5.GetProvisionerSchema.Response - (*ValidateProvisionerConfig_Request)(nil), // 63: tfplugin5.ValidateProvisionerConfig.Request - (*ValidateProvisionerConfig_Response)(nil), // 64: tfplugin5.ValidateProvisionerConfig.Response - (*ProvisionResource_Request)(nil), // 65: tfplugin5.ProvisionResource.Request - (*ProvisionResource_Response)(nil), // 66: tfplugin5.ProvisionResource.Response + (*Function)(nil), // 10: tfplugin5.Function + (*GetMetadata)(nil), // 11: tfplugin5.GetMetadata + (*GetProviderSchema)(nil), // 12: tfplugin5.GetProviderSchema + (*PrepareProviderConfig)(nil), // 13: tfplugin5.PrepareProviderConfig + (*UpgradeResourceState)(nil), // 14: tfplugin5.UpgradeResourceState + (*ValidateResourceTypeConfig)(nil), // 15: tfplugin5.ValidateResourceTypeConfig + (*ValidateDataSourceConfig)(nil), // 16: tfplugin5.ValidateDataSourceConfig + (*Configure)(nil), // 17: tfplugin5.Configure + (*ReadResource)(nil), // 18: tfplugin5.ReadResource + (*PlanResourceChange)(nil), // 19: tfplugin5.PlanResourceChange + (*ApplyResourceChange)(nil), // 20: tfplugin5.ApplyResourceChange + (*ImportResourceState)(nil), // 21: tfplugin5.ImportResourceState + (*ReadDataSource)(nil), // 22: tfplugin5.ReadDataSource + (*GetProvisionerSchema)(nil), // 23: tfplugin5.GetProvisionerSchema + (*ValidateProvisionerConfig)(nil), // 24: tfplugin5.ValidateProvisionerConfig + (*ProvisionResource)(nil), // 25: tfplugin5.ProvisionResource + (*GetFunctions)(nil), // 26: tfplugin5.GetFunctions + (*CallFunction)(nil), // 27: tfplugin5.CallFunction + (*AttributePath_Step)(nil), // 28: tfplugin5.AttributePath.Step + (*Stop_Request)(nil), // 29: tfplugin5.Stop.Request + (*Stop_Response)(nil), // 30: tfplugin5.Stop.Response + nil, // 31: tfplugin5.RawState.FlatmapEntry + (*Schema_Block)(nil), // 32: tfplugin5.Schema.Block + (*Schema_Attribute)(nil), // 33: tfplugin5.Schema.Attribute + (*Schema_NestedBlock)(nil), // 34: tfplugin5.Schema.NestedBlock + (*Function_Parameter)(nil), // 35: tfplugin5.Function.Parameter + (*Function_Return)(nil), // 36: tfplugin5.Function.Return + (*GetMetadata_Request)(nil), // 37: tfplugin5.GetMetadata.Request + (*GetMetadata_Response)(nil), // 38: tfplugin5.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 39: tfplugin5.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 40: tfplugin5.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 41: tfplugin5.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 42: tfplugin5.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 43: tfplugin5.GetProviderSchema.Response + nil, // 44: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 45: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 46: tfplugin5.GetProviderSchema.Response.FunctionsEntry + (*PrepareProviderConfig_Request)(nil), // 47: tfplugin5.PrepareProviderConfig.Request + (*PrepareProviderConfig_Response)(nil), // 48: tfplugin5.PrepareProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 49: tfplugin5.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 50: tfplugin5.UpgradeResourceState.Response + (*ValidateResourceTypeConfig_Request)(nil), // 51: tfplugin5.ValidateResourceTypeConfig.Request + (*ValidateResourceTypeConfig_Response)(nil), // 52: tfplugin5.ValidateResourceTypeConfig.Response + (*ValidateDataSourceConfig_Request)(nil), // 53: tfplugin5.ValidateDataSourceConfig.Request + (*ValidateDataSourceConfig_Response)(nil), // 54: tfplugin5.ValidateDataSourceConfig.Response + (*Configure_Request)(nil), // 55: tfplugin5.Configure.Request + (*Configure_Response)(nil), // 56: tfplugin5.Configure.Response + (*ReadResource_Request)(nil), // 57: tfplugin5.ReadResource.Request + (*ReadResource_Response)(nil), // 58: tfplugin5.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 59: tfplugin5.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 60: tfplugin5.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 61: tfplugin5.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 62: tfplugin5.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 63: tfplugin5.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 64: tfplugin5.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 65: tfplugin5.ImportResourceState.Response + (*ReadDataSource_Request)(nil), // 66: tfplugin5.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 67: tfplugin5.ReadDataSource.Response + (*GetProvisionerSchema_Request)(nil), // 68: tfplugin5.GetProvisionerSchema.Request + (*GetProvisionerSchema_Response)(nil), // 69: tfplugin5.GetProvisionerSchema.Response + (*ValidateProvisionerConfig_Request)(nil), // 70: tfplugin5.ValidateProvisionerConfig.Request + (*ValidateProvisionerConfig_Response)(nil), // 71: tfplugin5.ValidateProvisionerConfig.Response + (*ProvisionResource_Request)(nil), // 72: tfplugin5.ProvisionResource.Request + (*ProvisionResource_Response)(nil), // 73: tfplugin5.ProvisionResource.Response + (*GetFunctions_Request)(nil), // 74: tfplugin5.GetFunctions.Request + (*GetFunctions_Response)(nil), // 75: tfplugin5.GetFunctions.Response + nil, // 76: tfplugin5.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 77: tfplugin5.CallFunction.Request + (*CallFunction_Response)(nil), // 78: tfplugin5.CallFunction.Response } var file_tfplugin5_proto_depIdxs = []int32{ 1, // 0: tfplugin5.Diagnostic.severity:type_name -> tfplugin5.Diagnostic.Severity 5, // 1: tfplugin5.Diagnostic.attribute:type_name -> tfplugin5.AttributePath - 25, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step - 28, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry - 29, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block - 30, // 5: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute - 31, // 6: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock - 0, // 7: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind - 0, // 8: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind - 29, // 9: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block - 2, // 10: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode - 9, // 11: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities - 4, // 12: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 34, // 13: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata - 35, // 14: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata - 8, // 15: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema - 38, // 16: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry - 39, // 17: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry - 4, // 18: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 8, // 19: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema - 9, // 20: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities - 8, // 21: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema - 8, // 22: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema - 3, // 23: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 24: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue - 4, // 25: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 7, // 26: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState - 3, // 27: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue - 4, // 28: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 29: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 30: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 31: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 32: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 33: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 34: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 35: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue - 3, // 36: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 37: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue - 4, // 38: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 39: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue - 3, // 40: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue - 3, // 41: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 42: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 43: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue - 5, // 44: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath - 4, // 45: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 46: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue - 3, // 47: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue - 3, // 48: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 49: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 50: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue - 4, // 51: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 52: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue - 57, // 53: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource - 4, // 54: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 55: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 56: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue - 3, // 57: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue - 4, // 58: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 8, // 59: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema - 4, // 60: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 61: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue - 4, // 62: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 3, // 63: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue - 3, // 64: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue - 4, // 65: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic - 32, // 66: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request - 36, // 67: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request - 40, // 68: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request - 44, // 69: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request - 46, // 70: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request - 42, // 71: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request - 48, // 72: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request - 50, // 73: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request - 52, // 74: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request - 54, // 75: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request - 56, // 76: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request - 59, // 77: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request - 26, // 78: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request - 61, // 79: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request - 63, // 80: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request - 65, // 81: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request - 26, // 82: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request - 33, // 83: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response - 37, // 84: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response - 41, // 85: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response - 45, // 86: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response - 47, // 87: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response - 43, // 88: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response - 49, // 89: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response - 51, // 90: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response - 53, // 91: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response - 55, // 92: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response - 58, // 93: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response - 60, // 94: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response - 27, // 95: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response - 62, // 96: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response - 64, // 97: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response - 66, // 98: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response - 27, // 99: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response - 83, // [83:100] is the sub-list for method output_type - 66, // [66:83] is the sub-list for method input_type - 66, // [66:66] is the sub-list for extension type_name - 66, // [66:66] is the sub-list for extension extendee - 0, // [0:66] is the sub-list for field type_name + 28, // 2: tfplugin5.AttributePath.steps:type_name -> tfplugin5.AttributePath.Step + 31, // 3: tfplugin5.RawState.flatmap:type_name -> tfplugin5.RawState.FlatmapEntry + 32, // 4: tfplugin5.Schema.block:type_name -> tfplugin5.Schema.Block + 35, // 5: tfplugin5.Function.parameters:type_name -> tfplugin5.Function.Parameter + 35, // 6: tfplugin5.Function.variadic_parameter:type_name -> tfplugin5.Function.Parameter + 36, // 7: tfplugin5.Function.return:type_name -> tfplugin5.Function.Return + 0, // 8: tfplugin5.Function.description_kind:type_name -> tfplugin5.StringKind + 33, // 9: tfplugin5.Schema.Block.attributes:type_name -> tfplugin5.Schema.Attribute + 34, // 10: tfplugin5.Schema.Block.block_types:type_name -> tfplugin5.Schema.NestedBlock + 0, // 11: tfplugin5.Schema.Block.description_kind:type_name -> tfplugin5.StringKind + 0, // 12: tfplugin5.Schema.Attribute.description_kind:type_name -> tfplugin5.StringKind + 32, // 13: tfplugin5.Schema.NestedBlock.block:type_name -> tfplugin5.Schema.Block + 2, // 14: tfplugin5.Schema.NestedBlock.nesting:type_name -> tfplugin5.Schema.NestedBlock.NestingMode + 0, // 15: tfplugin5.Function.Parameter.description_kind:type_name -> tfplugin5.StringKind + 9, // 16: tfplugin5.GetMetadata.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 4, // 17: tfplugin5.GetMetadata.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 40, // 18: tfplugin5.GetMetadata.Response.data_sources:type_name -> tfplugin5.GetMetadata.DataSourceMetadata + 41, // 19: tfplugin5.GetMetadata.Response.resources:type_name -> tfplugin5.GetMetadata.ResourceMetadata + 39, // 20: tfplugin5.GetMetadata.Response.functions:type_name -> tfplugin5.GetMetadata.FunctionMetadata + 8, // 21: tfplugin5.GetProviderSchema.Response.provider:type_name -> tfplugin5.Schema + 44, // 22: tfplugin5.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry + 45, // 23: tfplugin5.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry + 4, // 24: tfplugin5.GetProviderSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 25: tfplugin5.GetProviderSchema.Response.provider_meta:type_name -> tfplugin5.Schema + 9, // 26: tfplugin5.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin5.ServerCapabilities + 46, // 27: tfplugin5.GetProviderSchema.Response.functions:type_name -> tfplugin5.GetProviderSchema.Response.FunctionsEntry + 8, // 28: tfplugin5.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin5.Schema + 8, // 29: tfplugin5.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin5.Schema + 10, // 30: tfplugin5.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 3, // 31: tfplugin5.PrepareProviderConfig.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 32: tfplugin5.PrepareProviderConfig.Response.prepared_config:type_name -> tfplugin5.DynamicValue + 4, // 33: tfplugin5.PrepareProviderConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 7, // 34: tfplugin5.UpgradeResourceState.Request.raw_state:type_name -> tfplugin5.RawState + 3, // 35: tfplugin5.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin5.DynamicValue + 4, // 36: tfplugin5.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 37: tfplugin5.ValidateResourceTypeConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 38: tfplugin5.ValidateResourceTypeConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 39: tfplugin5.ValidateDataSourceConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 40: tfplugin5.ValidateDataSourceConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 41: tfplugin5.Configure.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 42: tfplugin5.Configure.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 43: tfplugin5.ReadResource.Request.current_state:type_name -> tfplugin5.DynamicValue + 3, // 44: tfplugin5.ReadResource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 45: tfplugin5.ReadResource.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 46: tfplugin5.ReadResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 47: tfplugin5.PlanResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 48: tfplugin5.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin5.DynamicValue + 3, // 49: tfplugin5.PlanResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 50: tfplugin5.PlanResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 51: tfplugin5.PlanResourceChange.Response.planned_state:type_name -> tfplugin5.DynamicValue + 5, // 52: tfplugin5.PlanResourceChange.Response.requires_replace:type_name -> tfplugin5.AttributePath + 4, // 53: tfplugin5.PlanResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 54: tfplugin5.ApplyResourceChange.Request.prior_state:type_name -> tfplugin5.DynamicValue + 3, // 55: tfplugin5.ApplyResourceChange.Request.planned_state:type_name -> tfplugin5.DynamicValue + 3, // 56: tfplugin5.ApplyResourceChange.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 57: tfplugin5.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 58: tfplugin5.ApplyResourceChange.Response.new_state:type_name -> tfplugin5.DynamicValue + 4, // 59: tfplugin5.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 60: tfplugin5.ImportResourceState.ImportedResource.state:type_name -> tfplugin5.DynamicValue + 64, // 61: tfplugin5.ImportResourceState.Response.imported_resources:type_name -> tfplugin5.ImportResourceState.ImportedResource + 4, // 62: tfplugin5.ImportResourceState.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 63: tfplugin5.ReadDataSource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 64: tfplugin5.ReadDataSource.Request.provider_meta:type_name -> tfplugin5.DynamicValue + 3, // 65: tfplugin5.ReadDataSource.Response.state:type_name -> tfplugin5.DynamicValue + 4, // 66: tfplugin5.ReadDataSource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 8, // 67: tfplugin5.GetProvisionerSchema.Response.provisioner:type_name -> tfplugin5.Schema + 4, // 68: tfplugin5.GetProvisionerSchema.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 69: tfplugin5.ValidateProvisionerConfig.Request.config:type_name -> tfplugin5.DynamicValue + 4, // 70: tfplugin5.ValidateProvisionerConfig.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 3, // 71: tfplugin5.ProvisionResource.Request.config:type_name -> tfplugin5.DynamicValue + 3, // 72: tfplugin5.ProvisionResource.Request.connection:type_name -> tfplugin5.DynamicValue + 4, // 73: tfplugin5.ProvisionResource.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 76, // 74: tfplugin5.GetFunctions.Response.functions:type_name -> tfplugin5.GetFunctions.Response.FunctionsEntry + 4, // 75: tfplugin5.GetFunctions.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 10, // 76: tfplugin5.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin5.Function + 3, // 77: tfplugin5.CallFunction.Request.arguments:type_name -> tfplugin5.DynamicValue + 3, // 78: tfplugin5.CallFunction.Response.result:type_name -> tfplugin5.DynamicValue + 4, // 79: tfplugin5.CallFunction.Response.diagnostics:type_name -> tfplugin5.Diagnostic + 37, // 80: tfplugin5.Provider.GetMetadata:input_type -> tfplugin5.GetMetadata.Request + 42, // 81: tfplugin5.Provider.GetSchema:input_type -> tfplugin5.GetProviderSchema.Request + 47, // 82: tfplugin5.Provider.PrepareProviderConfig:input_type -> tfplugin5.PrepareProviderConfig.Request + 51, // 83: tfplugin5.Provider.ValidateResourceTypeConfig:input_type -> tfplugin5.ValidateResourceTypeConfig.Request + 53, // 84: tfplugin5.Provider.ValidateDataSourceConfig:input_type -> tfplugin5.ValidateDataSourceConfig.Request + 49, // 85: tfplugin5.Provider.UpgradeResourceState:input_type -> tfplugin5.UpgradeResourceState.Request + 55, // 86: tfplugin5.Provider.Configure:input_type -> tfplugin5.Configure.Request + 57, // 87: tfplugin5.Provider.ReadResource:input_type -> tfplugin5.ReadResource.Request + 59, // 88: tfplugin5.Provider.PlanResourceChange:input_type -> tfplugin5.PlanResourceChange.Request + 61, // 89: tfplugin5.Provider.ApplyResourceChange:input_type -> tfplugin5.ApplyResourceChange.Request + 63, // 90: tfplugin5.Provider.ImportResourceState:input_type -> tfplugin5.ImportResourceState.Request + 66, // 91: tfplugin5.Provider.ReadDataSource:input_type -> tfplugin5.ReadDataSource.Request + 74, // 92: tfplugin5.Provider.GetFunctions:input_type -> tfplugin5.GetFunctions.Request + 77, // 93: tfplugin5.Provider.CallFunction:input_type -> tfplugin5.CallFunction.Request + 29, // 94: tfplugin5.Provider.Stop:input_type -> tfplugin5.Stop.Request + 68, // 95: tfplugin5.Provisioner.GetSchema:input_type -> tfplugin5.GetProvisionerSchema.Request + 70, // 96: tfplugin5.Provisioner.ValidateProvisionerConfig:input_type -> tfplugin5.ValidateProvisionerConfig.Request + 72, // 97: tfplugin5.Provisioner.ProvisionResource:input_type -> tfplugin5.ProvisionResource.Request + 29, // 98: tfplugin5.Provisioner.Stop:input_type -> tfplugin5.Stop.Request + 38, // 99: tfplugin5.Provider.GetMetadata:output_type -> tfplugin5.GetMetadata.Response + 43, // 100: tfplugin5.Provider.GetSchema:output_type -> tfplugin5.GetProviderSchema.Response + 48, // 101: tfplugin5.Provider.PrepareProviderConfig:output_type -> tfplugin5.PrepareProviderConfig.Response + 52, // 102: tfplugin5.Provider.ValidateResourceTypeConfig:output_type -> tfplugin5.ValidateResourceTypeConfig.Response + 54, // 103: tfplugin5.Provider.ValidateDataSourceConfig:output_type -> tfplugin5.ValidateDataSourceConfig.Response + 50, // 104: tfplugin5.Provider.UpgradeResourceState:output_type -> tfplugin5.UpgradeResourceState.Response + 56, // 105: tfplugin5.Provider.Configure:output_type -> tfplugin5.Configure.Response + 58, // 106: tfplugin5.Provider.ReadResource:output_type -> tfplugin5.ReadResource.Response + 60, // 107: tfplugin5.Provider.PlanResourceChange:output_type -> tfplugin5.PlanResourceChange.Response + 62, // 108: tfplugin5.Provider.ApplyResourceChange:output_type -> tfplugin5.ApplyResourceChange.Response + 65, // 109: tfplugin5.Provider.ImportResourceState:output_type -> tfplugin5.ImportResourceState.Response + 67, // 110: tfplugin5.Provider.ReadDataSource:output_type -> tfplugin5.ReadDataSource.Response + 75, // 111: tfplugin5.Provider.GetFunctions:output_type -> tfplugin5.GetFunctions.Response + 78, // 112: tfplugin5.Provider.CallFunction:output_type -> tfplugin5.CallFunction.Response + 30, // 113: tfplugin5.Provider.Stop:output_type -> tfplugin5.Stop.Response + 69, // 114: tfplugin5.Provisioner.GetSchema:output_type -> tfplugin5.GetProvisionerSchema.Response + 71, // 115: tfplugin5.Provisioner.ValidateProvisionerConfig:output_type -> tfplugin5.ValidateProvisionerConfig.Response + 73, // 116: tfplugin5.Provisioner.ProvisionResource:output_type -> tfplugin5.ProvisionResource.Response + 30, // 117: tfplugin5.Provisioner.Stop:output_type -> tfplugin5.Stop.Response + 99, // [99:118] is the sub-list for method output_type + 80, // [80:99] is the sub-list for method input_type + 80, // [80:80] is the sub-list for extension type_name + 80, // [80:80] is the sub-list for extension extendee + 0, // [0:80] is the sub-list for field type_name } func init() { file_tfplugin5_proto_init() } @@ -4392,7 +5137,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMetadata); i { + switch v := v.(*Function); i { case 0: return &v.state case 1: @@ -4404,7 +5149,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProviderSchema); i { + switch v := v.(*GetMetadata); i { case 0: return &v.state case 1: @@ -4416,7 +5161,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrepareProviderConfig); i { + switch v := v.(*GetProviderSchema); i { case 0: return &v.state case 1: @@ -4428,7 +5173,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState); i { + switch v := v.(*PrepareProviderConfig); i { case 0: return &v.state case 1: @@ -4440,7 +5185,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceTypeConfig); i { + switch v := v.(*UpgradeResourceState); i { case 0: return &v.state case 1: @@ -4452,7 +5197,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataSourceConfig); i { + switch v := v.(*ValidateResourceTypeConfig); i { case 0: return &v.state case 1: @@ -4464,7 +5209,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Configure); i { + switch v := v.(*ValidateDataSourceConfig); i { case 0: return &v.state case 1: @@ -4476,7 +5221,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource); i { + switch v := v.(*Configure); i { case 0: return &v.state case 1: @@ -4488,7 +5233,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange); i { + switch v := v.(*ReadResource); i { case 0: return &v.state case 1: @@ -4500,7 +5245,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange); i { + switch v := v.(*PlanResourceChange); i { case 0: return &v.state case 1: @@ -4512,7 +5257,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState); i { + switch v := v.(*ApplyResourceChange); i { case 0: return &v.state case 1: @@ -4524,7 +5269,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource); i { + switch v := v.(*ImportResourceState); i { case 0: return &v.state case 1: @@ -4536,7 +5281,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProvisionerSchema); i { + switch v := v.(*ReadDataSource); i { case 0: return &v.state case 1: @@ -4548,7 +5293,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProvisionerConfig); i { + switch v := v.(*GetProvisionerSchema); i { case 0: return &v.state case 1: @@ -4560,7 +5305,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProvisionResource); i { + switch v := v.(*ValidateProvisionerConfig); i { case 0: return &v.state case 1: @@ -4572,7 +5317,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributePath_Step); i { + switch v := v.(*ProvisionResource); i { case 0: return &v.state case 1: @@ -4584,7 +5329,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Stop_Request); i { + switch v := v.(*GetFunctions); i { case 0: return &v.state case 1: @@ -4596,7 +5341,19 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Stop_Response); i { + switch v := v.(*CallFunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { case 0: return &v.state case 1: @@ -4608,7 +5365,7 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema_Block); i { + switch v := v.(*Stop_Request); i { case 0: return &v.state case 1: @@ -4620,6 +5377,30 @@ func file_tfplugin5_proto_init() { } } file_tfplugin5_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Stop_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Attribute); i { case 0: return &v.state @@ -4631,7 +5412,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_NestedBlock); i { case 0: return &v.state @@ -4643,7 +5424,31 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Return); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Request); i { case 0: return &v.state @@ -4655,7 +5460,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Response); i { case 0: return &v.state @@ -4667,7 +5472,19 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_FunctionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_DataSourceMetadata); i { case 0: return &v.state @@ -4679,7 +5496,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_ResourceMetadata); i { case 0: return &v.state @@ -4691,7 +5508,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Request); i { case 0: return &v.state @@ -4703,7 +5520,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Response); i { case 0: return &v.state @@ -4715,7 +5532,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PrepareProviderConfig_Request); i { case 0: return &v.state @@ -4727,7 +5544,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PrepareProviderConfig_Response); i { case 0: return &v.state @@ -4739,7 +5556,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Request); i { case 0: return &v.state @@ -4751,7 +5568,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Response); i { case 0: return &v.state @@ -4763,7 +5580,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceTypeConfig_Request); i { case 0: return &v.state @@ -4775,7 +5592,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceTypeConfig_Response); i { case 0: return &v.state @@ -4787,7 +5604,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataSourceConfig_Request); i { case 0: return &v.state @@ -4799,7 +5616,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataSourceConfig_Response); i { case 0: return &v.state @@ -4811,7 +5628,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Configure_Request); i { case 0: return &v.state @@ -4823,7 +5640,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Configure_Response); i { case 0: return &v.state @@ -4835,7 +5652,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Request); i { case 0: return &v.state @@ -4847,7 +5664,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Response); i { case 0: return &v.state @@ -4859,7 +5676,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Request); i { case 0: return &v.state @@ -4871,7 +5688,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Response); i { case 0: return &v.state @@ -4883,7 +5700,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Request); i { case 0: return &v.state @@ -4895,7 +5712,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Response); i { case 0: return &v.state @@ -4907,7 +5724,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Request); i { case 0: return &v.state @@ -4919,7 +5736,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_ImportedResource); i { case 0: return &v.state @@ -4931,7 +5748,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Response); i { case 0: return &v.state @@ -4943,7 +5760,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Request); i { case 0: return &v.state @@ -4955,7 +5772,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Response); i { case 0: return &v.state @@ -4967,7 +5784,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProvisionerSchema_Request); i { case 0: return &v.state @@ -4979,7 +5796,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProvisionerSchema_Response); i { case 0: return &v.state @@ -4991,7 +5808,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProvisionerConfig_Request); i { case 0: return &v.state @@ -5003,7 +5820,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProvisionerConfig_Response); i { case 0: return &v.state @@ -5015,7 +5832,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisionResource_Request); i { case 0: return &v.state @@ -5027,7 +5844,7 @@ func file_tfplugin5_proto_init() { return nil } } - file_tfplugin5_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin5_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProvisionResource_Response); i { case 0: return &v.state @@ -5039,8 +5856,57 @@ func file_tfplugin5_proto_init() { return nil } } + file_tfplugin5_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin5_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - file_tfplugin5_proto_msgTypes[22].OneofWrappers = []interface{}{ + file_tfplugin5_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_tfplugin5_proto_msgTypes[25].OneofWrappers = []interface{}{ (*AttributePath_Step_AttributeName)(nil), (*AttributePath_Step_ElementKeyString)(nil), (*AttributePath_Step_ElementKeyInt)(nil), @@ -5051,7 +5917,7 @@ func file_tfplugin5_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tfplugin5_proto_rawDesc, NumEnums: 3, - NumMessages: 64, + NumMessages: 76, NumExtensions: 0, NumServices: 2, }, diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto index 639147f9550..d56a57e9fa6 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5.proto @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 5.4 +// Terraform Plugin RPC protocol version 5.5 // -// This file defines version 5.4 of the RPC protocol. To implement a plugin +// This file defines version 5.5 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -41,6 +41,10 @@ message Diagnostic { string summary = 2; string detail = 3; AttributePath attribute = 4; + + // function_argument is the positional function argument for aligning + // configuration source. + optional int64 function_argument = 5; } message AttributePath { @@ -144,6 +148,62 @@ message ServerCapabilities { bool get_provider_schema_optional = 2; } +message Function { + // parameters is the ordered list of positional function parameters. + repeated Parameter parameters = 1; + + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + Parameter variadic_parameter = 2; + + // return is the function result. + Return return = 3; + + // summary is the human-readable shortened documentation for the function. + string summary = 4; + + // description is human-readable documentation for the function. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + + // deprecation_message is human-readable documentation if the + // function is deprecated. + string deprecation_message = 7; + + message Parameter { + // name is the human-readable display name for the parameter. + string name = 1; + + // type is the type constraint for the parameter. + bytes type = 2; + + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + bool allow_null_value = 3; + + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + bool allow_unknown_values = 4; + + // description is human-readable documentation for the parameter. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + } + + message Return { + // type is the type constraint for the function result. + bytes type = 1; + } +} + service Provider { //////// Information about what a provider supports/expects @@ -173,6 +233,15 @@ service Provider { rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + // Functions + + // GetFunctions returns the definitions of all functions. + rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response); + + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + rpc CallFunction(CallFunction.Request) returns (CallFunction.Response); + //////// Graceful Shutdown rpc Stop(Stop.Request) returns (Stop.Response); } @@ -186,6 +255,14 @@ message GetMetadata { repeated Diagnostic diagnostics = 2; repeated DataSourceMetadata data_sources = 3; repeated ResourceMetadata resources = 4; + + // functions returns metadata for any functions. + repeated FunctionMetadata functions = 5; + } + + message FunctionMetadata { + // name is the function name. + string name = 1; } message DataSourceMetadata { @@ -207,6 +284,9 @@ message GetProviderSchema { repeated Diagnostic diagnostics = 4; Schema provider_meta = 5; ServerCapabilities server_capabilities = 6; + + // functions is a mapping of function names to definitions. + map functions = 7; } } @@ -434,3 +514,33 @@ message ProvisionResource { repeated Diagnostic diagnostics = 2; } } + +message GetFunctions { + message Request {} + + message Response { + // functions is a mapping of function names to definitions. + map functions = 1; + + // diagnostics is any warnings or errors. + repeated Diagnostic diagnostics = 2; + } +} + +message CallFunction { + message Request { + // name is the name of the function being called. + string name = 1; + + // arguments is the data of each function argument value. + repeated DynamicValue arguments = 2; + } + + message Response { + // result is result value after running the function logic. + DynamicValue result = 1; + + // diagnostics is any warnings or errors from the function logic. + repeated Diagnostic diagnostics = 2; + } +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go index 0ed31f74882..188af414fca 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5/tfplugin5_grpc.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 5.4 +// Terraform Plugin RPC protocol version 5.5 // -// This file defines version 5.4 of the RPC protocol. To implement a plugin +// This file defines version 5.5 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -53,6 +53,8 @@ const ( Provider_ApplyResourceChange_FullMethodName = "/tfplugin5.Provider/ApplyResourceChange" Provider_ImportResourceState_FullMethodName = "/tfplugin5.Provider/ImportResourceState" Provider_ReadDataSource_FullMethodName = "/tfplugin5.Provider/ReadDataSource" + Provider_GetFunctions_FullMethodName = "/tfplugin5.Provider/GetFunctions" + Provider_CallFunction_FullMethodName = "/tfplugin5.Provider/CallFunction" Provider_Stop_FullMethodName = "/tfplugin5.Provider/Stop" ) @@ -81,6 +83,11 @@ type ProviderClient interface { ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) // ////// Graceful Shutdown Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) } @@ -201,6 +208,24 @@ func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_ return out, nil } +func (c *providerClient) GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) { + out := new(GetFunctions_Response) + err := c.cc.Invoke(ctx, Provider_GetFunctions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) { + out := new(CallFunction_Response) + err := c.cc.Invoke(ctx, Provider_CallFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *providerClient) Stop(ctx context.Context, in *Stop_Request, opts ...grpc.CallOption) (*Stop_Response, error) { out := new(Stop_Response) err := c.cc.Invoke(ctx, Provider_Stop_FullMethodName, in, out, opts...) @@ -235,6 +260,11 @@ type ProviderServer interface { ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) // ////// Graceful Shutdown Stop(context.Context, *Stop_Request) (*Stop_Response, error) mustEmbedUnimplementedProviderServer() @@ -280,6 +310,12 @@ func (UnimplementedProviderServer) ImportResourceState(context.Context, *ImportR func (UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") } +func (UnimplementedProviderServer) GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFunctions not implemented") +} +func (UnimplementedProviderServer) CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CallFunction not implemented") +} func (UnimplementedProviderServer) Stop(context.Context, *Stop_Request) (*Stop_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method Stop not implemented") } @@ -512,6 +548,42 @@ func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Provider_GetFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctions_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetFunctions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetFunctions(ctx, req.(*GetFunctions_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunction_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_CallFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).CallFunction(ctx, req.(*CallFunction_Request)) + } + return interceptor(ctx, in, info, handler) +} + func _Provider_Stop_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Stop_Request) if err := dec(in); err != nil { @@ -585,6 +657,14 @@ var Provider_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReadDataSource", Handler: _Provider_ReadDataSource_Handler, }, + { + MethodName: "GetFunctions", + Handler: _Provider_GetFunctions_Handler, + }, + { + MethodName: "CallFunction", + Handler: _Provider_CallFunction_Handler, + }, { MethodName: "Stop", Handler: _Provider_Stop_Handler, diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go index 81d692cef91..6f3029e5f77 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/diagnostic.go @@ -9,9 +9,10 @@ import ( func Diagnostic(in *tfprotov5.Diagnostic) (*tfplugin5.Diagnostic, error) { diag := &tfplugin5.Diagnostic{ - Severity: Diagnostic_Severity(in.Severity), - Summary: forceValidUTF8(in.Summary), - Detail: forceValidUTF8(in.Detail), + Severity: Diagnostic_Severity(in.Severity), + Summary: forceValidUTF8(in.Summary), + Detail: forceValidUTF8(in.Detail), + FunctionArgument: in.FunctionArgument, } if in.Attribute != nil { attr, err := AttributePath(in.Attribute) diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go new file mode 100644 index 00000000000..c8dc4d9a671 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/function.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov5" + "github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/tfplugin5" +) + +func CallFunction_Response(in *tfprotov5.CallFunctionResponse) (*tfplugin5.CallFunction_Response, error) { + if in == nil { + return nil, nil + } + + diags, err := Diagnostics(in.Diagnostics) + + if err != nil { + return nil, err + } + + resp := &tfplugin5.CallFunction_Response{ + Diagnostics: diags, + } + + if in.Result != nil { + resp.Result = DynamicValue(in.Result) + } + + return resp, nil +} + +func Function(in *tfprotov5.Function) (*tfplugin5.Function, error) { + if in == nil { + return nil, nil + } + + resp := &tfplugin5.Function{ + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + DeprecationMessage: in.DeprecationMessage, + Parameters: make([]*tfplugin5.Function_Parameter, 0, len(in.Parameters)), + Summary: in.Summary, + } + + for position, parameter := range in.Parameters { + if parameter == nil { + return nil, fmt.Errorf("missing function parameter definition at position: %d", position) + } + + functionParameter, err := Function_Parameter(parameter) + + if err != nil { + return nil, fmt.Errorf("unable to marshal function parameter at position %d: %w", position, err) + } + + resp.Parameters = append(resp.Parameters, functionParameter) + } + + if in.Return == nil { + return nil, fmt.Errorf("missing function return definition") + } + + functionReturn, err := Function_Return(in.Return) + + if err != nil { + return nil, fmt.Errorf("unable to marshal function return: %w", err) + } + + resp.Return = functionReturn + + if in.VariadicParameter != nil { + variadicParameter, err := Function_Parameter(in.VariadicParameter) + + if err != nil { + return nil, fmt.Errorf("unable to marshal variadic function parameter: %w", err) + } + + resp.VariadicParameter = variadicParameter + } + + return resp, nil +} + +func Function_Parameter(in *tfprotov5.FunctionParameter) (*tfplugin5.Function_Parameter, error) { + if in == nil { + return nil, nil + } + + resp := &tfplugin5.Function_Parameter{ + AllowNullValue: in.AllowNullValue, + AllowUnknownValues: in.AllowUnknownValues, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Name: in.Name, + } + + if in.Type == nil { + return nil, fmt.Errorf("missing function parameter type definition") + } + + ctyType, err := CtyType(in.Type) + + if err != nil { + return resp, fmt.Errorf("error marshaling function parameter type: %w", err) + } + + resp.Type = ctyType + + return resp, nil +} + +func Function_Return(in *tfprotov5.FunctionReturn) (*tfplugin5.Function_Return, error) { + if in == nil { + return nil, nil + } + + resp := &tfplugin5.Function_Return{} + + if in.Type == nil { + return nil, fmt.Errorf("missing function return type definition") + } + + ctyType, err := CtyType(in.Type) + + if err != nil { + return resp, fmt.Errorf("error marshaling function return type: %w", err) + } + + resp.Type = ctyType + + return resp, nil +} + +func GetFunctions_Response(in *tfprotov5.GetFunctionsResponse) (*tfplugin5.GetFunctions_Response, error) { + if in == nil { + return nil, nil + } + + diags, err := Diagnostics(in.Diagnostics) + + if err != nil { + return nil, err + } + + resp := &tfplugin5.GetFunctions_Response{ + Diagnostics: diags, + Functions: make(map[string]*tfplugin5.Function, len(in.Functions)), + } + + for name, functionPtr := range in.Functions { + function, err := Function(functionPtr) + + if err != nil { + return nil, fmt.Errorf("error marshaling function definition for %q: %w", name, err) + } + + resp.Functions[name] = function + } + + return resp, nil +} + +func GetMetadata_FunctionMetadata(in *tfprotov5.FunctionMetadata) *tfplugin5.GetMetadata_FunctionMetadata { + if in == nil { + return nil + } + + return &tfplugin5.GetMetadata_FunctionMetadata{ + Name: in.Name, + } +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go index 6e29c1dd5f1..e600925394a 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/internal/toproto/provider.go @@ -18,6 +18,7 @@ func GetMetadata_Response(in *tfprotov5.GetMetadataResponse) (*tfplugin5.GetMeta resp := &tfplugin5.GetMetadata_Response{ DataSources: make([]*tfplugin5.GetMetadata_DataSourceMetadata, 0, len(in.DataSources)), + Functions: make([]*tfplugin5.GetMetadata_FunctionMetadata, 0, len(in.Functions)), Resources: make([]*tfplugin5.GetMetadata_ResourceMetadata, 0, len(in.Resources)), ServerCapabilities: ServerCapabilities(in.ServerCapabilities), } @@ -26,6 +27,10 @@ func GetMetadata_Response(in *tfprotov5.GetMetadataResponse) (*tfplugin5.GetMeta resp.DataSources = append(resp.DataSources, GetMetadata_DataSourceMetadata(&datasource)) } + for _, function := range in.Functions { + resp.Functions = append(resp.Functions, GetMetadata_FunctionMetadata(&function)) + } + for _, resource := range in.Resources { resp.Resources = append(resp.Resources, GetMetadata_ResourceMetadata(&resource)) } @@ -50,6 +55,9 @@ func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) (*tfplu return nil, nil } resp := tfplugin5.GetProviderSchema_Response{ + DataSourceSchemas: make(map[string]*tfplugin5.Schema, len(in.DataSourceSchemas)), + Functions: make(map[string]*tfplugin5.Function, len(in.Functions)), + ResourceSchemas: make(map[string]*tfplugin5.Schema, len(in.ResourceSchemas)), ServerCapabilities: ServerCapabilities(in.ServerCapabilities), } if in.Provider != nil { @@ -66,7 +74,7 @@ func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) (*tfplu } resp.ProviderMeta = schema } - resp.ResourceSchemas = make(map[string]*tfplugin5.Schema, len(in.ResourceSchemas)) + for k, v := range in.ResourceSchemas { if v == nil { resp.ResourceSchemas[k] = nil @@ -78,7 +86,7 @@ func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) (*tfplu } resp.ResourceSchemas[k] = schema } - resp.DataSourceSchemas = make(map[string]*tfplugin5.Schema, len(in.DataSourceSchemas)) + for k, v := range in.DataSourceSchemas { if v == nil { resp.DataSourceSchemas[k] = nil @@ -90,6 +98,22 @@ func GetProviderSchema_Response(in *tfprotov5.GetProviderSchemaResponse) (*tfplu } resp.DataSourceSchemas[k] = schema } + + for name, functionPtr := range in.Functions { + if functionPtr == nil { + resp.Functions[name] = nil + continue + } + + function, err := Function(functionPtr) + + if err != nil { + return &resp, fmt.Errorf("error marshaling function definition for %q: %w", name, err) + } + + resp.Functions[name] = function + } + diags, err := Diagnostics(in.Diagnostics) if err != nil { return &resp, err diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go index 20b7e44e5c2..fa85a8e04fc 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/provider.go @@ -47,6 +47,16 @@ type ProviderServer interface { // data source is to terraform-plugin-go, so they're their own // interface that is composed into ProviderServer. DataSourceServer + + // FunctionServer is an interface encapsulating all the function-related RPC + // requests. ProviderServer implementations must implement them, but they + // are a handy interface for defining what a function is to + // terraform-plugin-go, so they are their own interface that is composed + // into ProviderServer. + // + // This will be required in an upcoming release. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // FunctionServer } // GetMetadataRequest represents a GetMetadata RPC request. @@ -66,6 +76,9 @@ type GetMetadataResponse struct { // DataSources returns metadata for all data resources. DataSources []DataSourceMetadata + // Functions returns metadata for all functions. + Functions []FunctionMetadata + // Resources returns metadata for all managed resources. Resources []ResourceMetadata } @@ -106,6 +119,14 @@ type GetProviderSchemaResponse struct { // `data` in a user's configuration. DataSourceSchemas map[string]*Schema + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function + // Diagnostics report errors or warnings related to returning the // provider's schemas. Returning an empty slice indicates success, with // no errors or warnings generated. diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go index c9fcdc75503..d7adf6c49c0 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server/server.go @@ -903,3 +903,123 @@ func (s *server) ImportResourceState(ctx context.Context, req *tfplugin5.ImportR } return ret, nil } + +func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin5.CallFunction_Request) (*tfplugin5.CallFunction_Response, error) { + rpc := "CallFunction" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and error in preference of s.downstream.CallFunction + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov5.FunctionServer) + + if !ok { + logging.ProtocolError(ctx, "ProviderServer does not implement FunctionServer") + + protoResp := &tfplugin5.CallFunction_Response{ + Diagnostics: []*tfplugin5.Diagnostic{ + { + Severity: tfplugin5.Diagnostic_ERROR, + Summary: "Provider Functions Not Implemented", + Detail: "A provider-defined function call was received by the provider, however the provider does not implement functions. " + + "Either upgrade the provider to a version that implements provider-defined functions or this is a bug in Terraform that should be reported to the Terraform maintainers.", + }, + }, + } + + return protoResp, nil + } + + req, err := fromproto.CallFunctionRequest(protoReq) + + if err != nil { + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]any{logging.KeyError: err}) + + return nil, err + } + + for position, argument := range req.Arguments { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", fmt.Sprintf("Arguments_%d", position), argument) + } + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.CallFunction(ctx, req) + resp, err := functionServer.CallFunction(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "Result", resp.Result) + + protoResp, err := toproto.CallFunction_Response(resp) + + if err != nil { + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]any{logging.KeyError: err}) + return nil, err + } + + return protoResp, nil +} + +func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin5.GetFunctions_Request) (*tfplugin5.GetFunctions_Response, error) { + rpc := "GetFunctions" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and response in preference of s.downstream.GetFunctions + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov5.FunctionServer) + + if !ok { + logging.ProtocolWarn(ctx, "ProviderServer does not implement FunctionServer") + + protoResp := &tfplugin5.GetFunctions_Response{ + Functions: map[string]*tfplugin5.Function{}, + } + + return protoResp, nil + } + + req, err := fromproto.GetFunctionsRequest(protoReq) + + if err != nil { + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]any{logging.KeyError: err}) + + return nil, err + } + + ctx = tf5serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.GetFunctions(ctx, req) + resp, err := functionServer.GetFunctions(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf5serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp, err := toproto.GetFunctions_Response(resp) + + if err != nil { + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]any{logging.KeyError: err}) + return nil, err + } + + return protoResp, nil +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go index 8f856abbba7..2fe46f327e7 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/diagnostic.go @@ -42,6 +42,10 @@ type Diagnostic struct { // indicate that the problem is with a certain field in the resource, // which helps users find the source of the problem. Attribute *tftypes.AttributePath + + // FunctionArgument is the positional function argument for aligning + // configuration source. + FunctionArgument *int64 } // DiagnosticSeverity represents different classes of Diagnostic which affect diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go new file mode 100644 index 00000000000..613d67c2e93 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/function.go @@ -0,0 +1,141 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package tfprotov6 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +// Function describes the definition of a function. Result must be defined. +type Function struct { + // Parameters is the ordered list of positional function parameters. + Parameters []*FunctionParameter + + // VariadicParameter is an optional final parameter which accepts zero or + // more argument values, in which Terraform will send an ordered list of the + // parameter type. + VariadicParameter *FunctionParameter + + // Return is the function result. + Return *FunctionReturn + + // Summary is the shortened human-readable documentation for the function. + Summary string + + // Description is the longer human-readable documentation for the function. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // DeprecationMessage is the human-readable documentation if the function + // is deprecated. This message should be practitioner oriented to explain + // how their configuration should be updated. + DeprecationMessage string +} + +// FunctionMetadata describes metadata for a function in the GetMetadata RPC. +type FunctionMetadata struct { + // Name is the name of the function. + Name string +} + +// FunctionParameter describes the definition of a function parameter. Type must +// be defined. +type FunctionParameter struct { + // AllowNullValue when enabled denotes that a null argument value can be + // passed to the provider. When disabled, Terraform returns an error if the + // argument value is null. + AllowNullValue bool + + // AllowUnknownValues when enabled denotes that any unknown argument value + // (recursively checked for collections) can be passed to the provider. When + // disabled and an unknown value is present, Terraform skips the function + // call entirely and returns an unknown value result from the function. + AllowUnknownValues bool + + // Description is the human-readable documentation for the parameter. + Description string + + // DescriptionKind indicates the formatting and encoding that the + // Description field is using. + DescriptionKind StringKind + + // Name is the human-readable display name for the parameter. Parameters + // are by definition positional and this name is only used in documentation. + Name string + + // Type indicates the type of data the parameter expects. + Type tftypes.Type +} + +// FunctionReturn describes the definition of a function result. Type must be +// defined. +type FunctionReturn struct { + // Type indicates the type of return data. + Type tftypes.Type +} + +// FunctionServer is an interface containing the methods a function +// implementation needs to fill. +type FunctionServer interface { + // CallFunction is called when Terraform wants to execute the logic of a + // function referenced in the configuration. + CallFunction(context.Context, *CallFunctionRequest) (*CallFunctionResponse, error) + + // GetFunctions is called when Terraform wants to lookup which functions a + // provider supports when not calling GetProviderSchema. + GetFunctions(context.Context, *GetFunctionsRequest) (*GetFunctionsResponse, error) +} + +// CallFunctionRequest is the request Terraform sends when it wants to execute +// the logic of function referenced in the configuration. +type CallFunctionRequest struct { + // Name is the function name being called. + Name string + + // Arguments is the configuration value of each argument the practitioner + // supplied for the function call. The ordering and value of each element + // matches the function parameters and their associated type. If the + // function definition includes a final variadic parameter, its value is an + // ordered list of the variadic parameter type. + Arguments []*DynamicValue +} + +// CallFunctionResponse is the response from the provider with the result of +// executing the logic of the function. +type CallFunctionResponse struct { + // Diagnostics report errors or warnings related to the execution of the + // function logic. Returning an empty slice indicates a successful response + // with no warnings or errors presented to practitioners. + Diagnostics []*Diagnostic + + // Result is the return value from the called function, matching the result + // type in the function definition. + Result *DynamicValue +} + +// GetFunctionsRequest is the request Terraform sends when it wants to lookup +// which functions a provider supports when not calling GetProviderSchema. +type GetFunctionsRequest struct{} + +// GetFunctionsResponse is the response from the provider about the implemented +// functions. +type GetFunctionsResponse struct { + // Diagnostics report errors or warnings related to the provider + // implementation. Returning an empty slice indicates a successful response + // with no warnings or errors presented to practitioners. + Diagnostics []*Diagnostic + + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go index 29bed4540c4..18945fb55b2 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/diag/diagnostics.go @@ -53,6 +53,10 @@ func (d Diagnostics) Log(ctx context.Context) { diagnosticFields[logging.KeyDiagnosticAttribute] = diagnostic.Attribute.String() } + if diagnostic.FunctionArgument != nil { + diagnosticFields[logging.KeyDiagnosticFunctionArgument] = *diagnostic.FunctionArgument + } + switch diagnostic.Severity { case tfprotov6.DiagnosticSeverityError: logging.ProtocolError(ctx, "Response contains error diagnostic", diagnosticFields) diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go new file mode 100644 index 00000000000..1e7f00328a5 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/fromproto/function.go @@ -0,0 +1,36 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package fromproto + +import ( + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func CallFunctionRequest(in *tfplugin6.CallFunction_Request) (*tfprotov6.CallFunctionRequest, error) { + if in == nil { + return nil, nil + } + + resp := &tfprotov6.CallFunctionRequest{ + Arguments: make([]*tfprotov6.DynamicValue, 0, len(in.Arguments)), + Name: in.Name, + } + + for _, argument := range in.Arguments { + resp.Arguments = append(resp.Arguments, DynamicValue(argument)) + } + + return resp, nil +} + +func GetFunctionsRequest(in *tfplugin6.GetFunctions_Request) (*tfprotov6.GetFunctionsRequest, error) { + if in == nil { + return nil, nil + } + + resp := &tfprotov6.GetFunctionsRequest{} + + return resp, nil +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go index 1e2cd2366a5..46ecfc74d4f 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 6.4 +// Terraform Plugin RPC protocol version 6.5 // -// This file defines version 6.4 of the RPC protocol. To implement a plugin +// This file defines version 6.5 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -316,6 +316,9 @@ type Diagnostic struct { Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` Detail string `protobuf:"bytes,3,opt,name=detail,proto3" json:"detail,omitempty"` Attribute *AttributePath `protobuf:"bytes,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + // function_argument is the positional function argument for aligning + // configuration source. + FunctionArgument *int64 `protobuf:"varint,5,opt,name=function_argument,json=functionArgument,proto3,oneof" json:"function_argument,omitempty"` } func (x *Diagnostic) Reset() { @@ -378,6 +381,13 @@ func (x *Diagnostic) GetAttribute() *AttributePath { return nil } +func (x *Diagnostic) GetFunctionArgument() int64 { + if x != nil && x.FunctionArgument != nil { + return *x.FunctionArgument + } + return 0 +} + type AttributePath struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -581,6 +591,111 @@ func (x *Schema) GetBlock() *Schema_Block { return nil } +type Function struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // parameters is the ordered list of positional function parameters. + Parameters []*Function_Parameter `protobuf:"bytes,1,rep,name=parameters,proto3" json:"parameters,omitempty"` + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + VariadicParameter *Function_Parameter `protobuf:"bytes,2,opt,name=variadic_parameter,json=variadicParameter,proto3" json:"variadic_parameter,omitempty"` + // return is the function result. + Return *Function_Return `protobuf:"bytes,3,opt,name=return,proto3" json:"return,omitempty"` + // summary is the human-readable shortened documentation for the function. + Summary string `protobuf:"bytes,4,opt,name=summary,proto3" json:"summary,omitempty"` + // description is human-readable documentation for the function. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` + // deprecation_message is human-readable documentation if the + // function is deprecated. + DeprecationMessage string `protobuf:"bytes,7,opt,name=deprecation_message,json=deprecationMessage,proto3" json:"deprecation_message,omitempty"` +} + +func (x *Function) Reset() { + *x = Function{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function) ProtoMessage() {} + +func (x *Function) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function.ProtoReflect.Descriptor instead. +func (*Function) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6} +} + +func (x *Function) GetParameters() []*Function_Parameter { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Function) GetVariadicParameter() *Function_Parameter { + if x != nil { + return x.VariadicParameter + } + return nil +} + +func (x *Function) GetReturn() *Function_Return { + if x != nil { + return x.Return + } + return nil +} + +func (x *Function) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Function) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +func (x *Function) GetDeprecationMessage() string { + if x != nil { + return x.DeprecationMessage + } + return "" +} + // ServerCapabilities allows providers to communicate extra information // regarding supported protocol features. This is used to indicate // availability of certain forward-compatible changes which may be optional @@ -603,7 +718,7 @@ type ServerCapabilities struct { func (x *ServerCapabilities) Reset() { *x = ServerCapabilities{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[6] + mi := &file_tfplugin6_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -616,7 +731,7 @@ func (x *ServerCapabilities) String() string { func (*ServerCapabilities) ProtoMessage() {} func (x *ServerCapabilities) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[6] + mi := &file_tfplugin6_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -629,7 +744,7 @@ func (x *ServerCapabilities) ProtoReflect() protoreflect.Message { // Deprecated: Use ServerCapabilities.ProtoReflect.Descriptor instead. func (*ServerCapabilities) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{6} + return file_tfplugin6_proto_rawDescGZIP(), []int{7} } func (x *ServerCapabilities) GetPlanDestroy() bool { @@ -655,7 +770,7 @@ type GetMetadata struct { func (x *GetMetadata) Reset() { *x = GetMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[7] + mi := &file_tfplugin6_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -668,7 +783,7 @@ func (x *GetMetadata) String() string { func (*GetMetadata) ProtoMessage() {} func (x *GetMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[7] + mi := &file_tfplugin6_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -681,7 +796,7 @@ func (x *GetMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{7} + return file_tfplugin6_proto_rawDescGZIP(), []int{8} } type GetProviderSchema struct { @@ -693,7 +808,7 @@ type GetProviderSchema struct { func (x *GetProviderSchema) Reset() { *x = GetProviderSchema{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[8] + mi := &file_tfplugin6_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -706,7 +821,7 @@ func (x *GetProviderSchema) String() string { func (*GetProviderSchema) ProtoMessage() {} func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[8] + mi := &file_tfplugin6_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -719,7 +834,7 @@ func (x *GetProviderSchema) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema.ProtoReflect.Descriptor instead. func (*GetProviderSchema) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{8} + return file_tfplugin6_proto_rawDescGZIP(), []int{9} } type ValidateProviderConfig struct { @@ -731,7 +846,7 @@ type ValidateProviderConfig struct { func (x *ValidateProviderConfig) Reset() { *x = ValidateProviderConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[9] + mi := &file_tfplugin6_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -744,7 +859,7 @@ func (x *ValidateProviderConfig) String() string { func (*ValidateProviderConfig) ProtoMessage() {} func (x *ValidateProviderConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[9] + mi := &file_tfplugin6_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -757,7 +872,7 @@ func (x *ValidateProviderConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProviderConfig.ProtoReflect.Descriptor instead. func (*ValidateProviderConfig) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9} + return file_tfplugin6_proto_rawDescGZIP(), []int{10} } type UpgradeResourceState struct { @@ -769,7 +884,7 @@ type UpgradeResourceState struct { func (x *UpgradeResourceState) Reset() { *x = UpgradeResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[10] + mi := &file_tfplugin6_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -782,7 +897,7 @@ func (x *UpgradeResourceState) String() string { func (*UpgradeResourceState) ProtoMessage() {} func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[10] + mi := &file_tfplugin6_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -795,7 +910,7 @@ func (x *UpgradeResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState.ProtoReflect.Descriptor instead. func (*UpgradeResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{10} + return file_tfplugin6_proto_rawDescGZIP(), []int{11} } type ValidateResourceConfig struct { @@ -807,7 +922,7 @@ type ValidateResourceConfig struct { func (x *ValidateResourceConfig) Reset() { *x = ValidateResourceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[11] + mi := &file_tfplugin6_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -820,7 +935,7 @@ func (x *ValidateResourceConfig) String() string { func (*ValidateResourceConfig) ProtoMessage() {} func (x *ValidateResourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[11] + mi := &file_tfplugin6_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -833,7 +948,7 @@ func (x *ValidateResourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceConfig.ProtoReflect.Descriptor instead. func (*ValidateResourceConfig) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{11} + return file_tfplugin6_proto_rawDescGZIP(), []int{12} } type ValidateDataResourceConfig struct { @@ -845,7 +960,7 @@ type ValidateDataResourceConfig struct { func (x *ValidateDataResourceConfig) Reset() { *x = ValidateDataResourceConfig{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[12] + mi := &file_tfplugin6_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -858,7 +973,7 @@ func (x *ValidateDataResourceConfig) String() string { func (*ValidateDataResourceConfig) ProtoMessage() {} func (x *ValidateDataResourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[12] + mi := &file_tfplugin6_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -871,7 +986,7 @@ func (x *ValidateDataResourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateDataResourceConfig.ProtoReflect.Descriptor instead. func (*ValidateDataResourceConfig) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{12} + return file_tfplugin6_proto_rawDescGZIP(), []int{13} } type ConfigureProvider struct { @@ -883,7 +998,7 @@ type ConfigureProvider struct { func (x *ConfigureProvider) Reset() { *x = ConfigureProvider{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[13] + mi := &file_tfplugin6_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -896,7 +1011,7 @@ func (x *ConfigureProvider) String() string { func (*ConfigureProvider) ProtoMessage() {} func (x *ConfigureProvider) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[13] + mi := &file_tfplugin6_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -909,7 +1024,7 @@ func (x *ConfigureProvider) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigureProvider.ProtoReflect.Descriptor instead. func (*ConfigureProvider) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{13} + return file_tfplugin6_proto_rawDescGZIP(), []int{14} } type ReadResource struct { @@ -921,7 +1036,7 @@ type ReadResource struct { func (x *ReadResource) Reset() { *x = ReadResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[14] + mi := &file_tfplugin6_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -934,7 +1049,7 @@ func (x *ReadResource) String() string { func (*ReadResource) ProtoMessage() {} func (x *ReadResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[14] + mi := &file_tfplugin6_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -947,7 +1062,7 @@ func (x *ReadResource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource.ProtoReflect.Descriptor instead. func (*ReadResource) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{14} + return file_tfplugin6_proto_rawDescGZIP(), []int{15} } type PlanResourceChange struct { @@ -959,7 +1074,7 @@ type PlanResourceChange struct { func (x *PlanResourceChange) Reset() { *x = PlanResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[15] + mi := &file_tfplugin6_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -972,7 +1087,7 @@ func (x *PlanResourceChange) String() string { func (*PlanResourceChange) ProtoMessage() {} func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[15] + mi := &file_tfplugin6_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -985,7 +1100,7 @@ func (x *PlanResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange.ProtoReflect.Descriptor instead. func (*PlanResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{15} + return file_tfplugin6_proto_rawDescGZIP(), []int{16} } type ApplyResourceChange struct { @@ -997,7 +1112,7 @@ type ApplyResourceChange struct { func (x *ApplyResourceChange) Reset() { *x = ApplyResourceChange{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[16] + mi := &file_tfplugin6_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1010,7 +1125,7 @@ func (x *ApplyResourceChange) String() string { func (*ApplyResourceChange) ProtoMessage() {} func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[16] + mi := &file_tfplugin6_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1023,7 +1138,7 @@ func (x *ApplyResourceChange) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange.ProtoReflect.Descriptor instead. func (*ApplyResourceChange) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{16} + return file_tfplugin6_proto_rawDescGZIP(), []int{17} } type ImportResourceState struct { @@ -1035,7 +1150,7 @@ type ImportResourceState struct { func (x *ImportResourceState) Reset() { *x = ImportResourceState{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[17] + mi := &file_tfplugin6_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1048,7 +1163,7 @@ func (x *ImportResourceState) String() string { func (*ImportResourceState) ProtoMessage() {} func (x *ImportResourceState) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[17] + mi := &file_tfplugin6_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1061,7 +1176,7 @@ func (x *ImportResourceState) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState.ProtoReflect.Descriptor instead. func (*ImportResourceState) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17} + return file_tfplugin6_proto_rawDescGZIP(), []int{18} } type ReadDataSource struct { @@ -1073,7 +1188,7 @@ type ReadDataSource struct { func (x *ReadDataSource) Reset() { *x = ReadDataSource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[18] + mi := &file_tfplugin6_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1086,7 +1201,7 @@ func (x *ReadDataSource) String() string { func (*ReadDataSource) ProtoMessage() {} func (x *ReadDataSource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[18] + mi := &file_tfplugin6_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1099,7 +1214,83 @@ func (x *ReadDataSource) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource.ProtoReflect.Descriptor instead. func (*ReadDataSource) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{18} + return file_tfplugin6_proto_rawDescGZIP(), []int{19} +} + +type GetFunctions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions) Reset() { + *x = GetFunctions{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions) ProtoMessage() {} + +func (x *GetFunctions) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions.ProtoReflect.Descriptor instead. +func (*GetFunctions) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20} +} + +type CallFunction struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CallFunction) Reset() { + *x = CallFunction{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction) ProtoMessage() {} + +func (x *CallFunction) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction.ProtoReflect.Descriptor instead. +func (*CallFunction) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21} } type AttributePath_Step struct { @@ -1118,7 +1309,7 @@ type AttributePath_Step struct { func (x *AttributePath_Step) Reset() { *x = AttributePath_Step{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[19] + mi := &file_tfplugin6_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1131,7 +1322,7 @@ func (x *AttributePath_Step) String() string { func (*AttributePath_Step) ProtoMessage() {} func (x *AttributePath_Step) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[19] + mi := &file_tfplugin6_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1210,7 +1401,7 @@ type StopProvider_Request struct { func (x *StopProvider_Request) Reset() { *x = StopProvider_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[20] + mi := &file_tfplugin6_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1223,7 +1414,7 @@ func (x *StopProvider_Request) String() string { func (*StopProvider_Request) ProtoMessage() {} func (x *StopProvider_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[20] + mi := &file_tfplugin6_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1250,7 +1441,7 @@ type StopProvider_Response struct { func (x *StopProvider_Response) Reset() { *x = StopProvider_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[21] + mi := &file_tfplugin6_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1263,7 +1454,7 @@ func (x *StopProvider_Response) String() string { func (*StopProvider_Response) ProtoMessage() {} func (x *StopProvider_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[21] + mi := &file_tfplugin6_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1302,7 +1493,7 @@ type Schema_Block struct { func (x *Schema_Block) Reset() { *x = Schema_Block{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[23] + mi := &file_tfplugin6_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1315,7 +1506,7 @@ func (x *Schema_Block) String() string { func (*Schema_Block) ProtoMessage() {} func (x *Schema_Block) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[23] + mi := &file_tfplugin6_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1393,7 +1584,7 @@ type Schema_Attribute struct { func (x *Schema_Attribute) Reset() { *x = Schema_Attribute{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[24] + mi := &file_tfplugin6_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1406,7 +1597,7 @@ func (x *Schema_Attribute) String() string { func (*Schema_Attribute) ProtoMessage() {} func (x *Schema_Attribute) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[24] + mi := &file_tfplugin6_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1507,7 +1698,7 @@ type Schema_NestedBlock struct { func (x *Schema_NestedBlock) Reset() { *x = Schema_NestedBlock{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[25] + mi := &file_tfplugin6_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1520,7 +1711,7 @@ func (x *Schema_NestedBlock) String() string { func (*Schema_NestedBlock) ProtoMessage() {} func (x *Schema_NestedBlock) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[25] + mi := &file_tfplugin6_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1590,7 +1781,7 @@ type Schema_Object struct { func (x *Schema_Object) Reset() { *x = Schema_Object{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[26] + mi := &file_tfplugin6_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1603,7 +1794,7 @@ func (x *Schema_Object) String() string { func (*Schema_Object) ProtoMessage() {} func (x *Schema_Object) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[26] + mi := &file_tfplugin6_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1649,29 +1840,47 @@ func (x *Schema_Object) GetMaxItems() int64 { return 0 } -type GetMetadata_Request struct { +type Function_Parameter struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields -} -func (x *GetMetadata_Request) Reset() { - *x = GetMetadata_Request{} + // name is the human-readable display name for the parameter. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // type is the type constraint for the parameter. + Type []byte `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"` + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + AllowNullValue bool `protobuf:"varint,3,opt,name=allow_null_value,json=allowNullValue,proto3" json:"allow_null_value,omitempty"` + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + AllowUnknownValues bool `protobuf:"varint,4,opt,name=allow_unknown_values,json=allowUnknownValues,proto3" json:"allow_unknown_values,omitempty"` + // description is human-readable documentation for the parameter. + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + // description_kind is the formatting of the description. + DescriptionKind StringKind `protobuf:"varint,6,opt,name=description_kind,json=descriptionKind,proto3,enum=tfplugin6.StringKind" json:"description_kind,omitempty"` +} + +func (x *Function_Parameter) Reset() { + *x = Function_Parameter{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[27] + mi := &file_tfplugin6_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetMetadata_Request) String() string { +func (x *Function_Parameter) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetMetadata_Request) ProtoMessage() {} +func (*Function_Parameter) ProtoMessage() {} -func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[27] +func (x *Function_Parameter) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1682,26 +1891,156 @@ func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. -func (*GetMetadata_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{7, 0} +// Deprecated: Use Function_Parameter.ProtoReflect.Descriptor instead. +func (*Function_Parameter) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 0} } -type GetMetadata_Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` - Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` - DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` - Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` +func (x *Function_Parameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Function_Parameter) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +func (x *Function_Parameter) GetAllowNullValue() bool { + if x != nil { + return x.AllowNullValue + } + return false +} + +func (x *Function_Parameter) GetAllowUnknownValues() bool { + if x != nil { + return x.AllowUnknownValues + } + return false +} + +func (x *Function_Parameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Function_Parameter) GetDescriptionKind() StringKind { + if x != nil { + return x.DescriptionKind + } + return StringKind_PLAIN +} + +type Function_Return struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // type is the type constraint for the function result. + Type []byte `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` +} + +func (x *Function_Return) Reset() { + *x = Function_Return{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Function_Return) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Function_Return) ProtoMessage() {} + +func (x *Function_Return) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Function_Return.ProtoReflect.Descriptor instead. +func (*Function_Return) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{6, 1} +} + +func (x *Function_Return) GetType() []byte { + if x != nil { + return x.Type + } + return nil +} + +type GetMetadata_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetMetadata_Request) Reset() { + *x = GetMetadata_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_Request) ProtoMessage() {} + +func (x *GetMetadata_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_Request.ProtoReflect.Descriptor instead. +func (*GetMetadata_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{8, 0} +} + +type GetMetadata_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ServerCapabilities *ServerCapabilities `protobuf:"bytes,1,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` + DataSources []*GetMetadata_DataSourceMetadata `protobuf:"bytes,3,rep,name=data_sources,json=dataSources,proto3" json:"data_sources,omitempty"` + Resources []*GetMetadata_ResourceMetadata `protobuf:"bytes,4,rep,name=resources,proto3" json:"resources,omitempty"` + // functions returns metadata for any functions. + Functions []*GetMetadata_FunctionMetadata `protobuf:"bytes,5,rep,name=functions,proto3" json:"functions,omitempty"` } func (x *GetMetadata_Response) Reset() { *x = GetMetadata_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[28] + mi := &file_tfplugin6_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1714,7 +2053,7 @@ func (x *GetMetadata_Response) String() string { func (*GetMetadata_Response) ProtoMessage() {} func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[28] + mi := &file_tfplugin6_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1727,7 +2066,7 @@ func (x *GetMetadata_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_Response.ProtoReflect.Descriptor instead. func (*GetMetadata_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{7, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{8, 1} } func (x *GetMetadata_Response) GetServerCapabilities() *ServerCapabilities { @@ -1758,6 +2097,61 @@ func (x *GetMetadata_Response) GetResources() []*GetMetadata_ResourceMetadata { return nil } +func (x *GetMetadata_Response) GetFunctions() []*GetMetadata_FunctionMetadata { + if x != nil { + return x.Functions + } + return nil +} + +type GetMetadata_FunctionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the function name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` +} + +func (x *GetMetadata_FunctionMetadata) Reset() { + *x = GetMetadata_FunctionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetMetadata_FunctionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetMetadata_FunctionMetadata) ProtoMessage() {} + +func (x *GetMetadata_FunctionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetMetadata_FunctionMetadata.ProtoReflect.Descriptor instead. +func (*GetMetadata_FunctionMetadata) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{8, 2} +} + +func (x *GetMetadata_FunctionMetadata) GetName() string { + if x != nil { + return x.Name + } + return "" +} + type GetMetadata_DataSourceMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1769,7 +2163,7 @@ type GetMetadata_DataSourceMetadata struct { func (x *GetMetadata_DataSourceMetadata) Reset() { *x = GetMetadata_DataSourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[29] + mi := &file_tfplugin6_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1782,7 +2176,7 @@ func (x *GetMetadata_DataSourceMetadata) String() string { func (*GetMetadata_DataSourceMetadata) ProtoMessage() {} func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[29] + mi := &file_tfplugin6_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1795,7 +2189,7 @@ func (x *GetMetadata_DataSourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_DataSourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_DataSourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{7, 2} + return file_tfplugin6_proto_rawDescGZIP(), []int{8, 3} } func (x *GetMetadata_DataSourceMetadata) GetTypeName() string { @@ -1816,7 +2210,7 @@ type GetMetadata_ResourceMetadata struct { func (x *GetMetadata_ResourceMetadata) Reset() { *x = GetMetadata_ResourceMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[30] + mi := &file_tfplugin6_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1829,7 +2223,7 @@ func (x *GetMetadata_ResourceMetadata) String() string { func (*GetMetadata_ResourceMetadata) ProtoMessage() {} func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[30] + mi := &file_tfplugin6_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1842,7 +2236,7 @@ func (x *GetMetadata_ResourceMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use GetMetadata_ResourceMetadata.ProtoReflect.Descriptor instead. func (*GetMetadata_ResourceMetadata) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{7, 3} + return file_tfplugin6_proto_rawDescGZIP(), []int{8, 4} } func (x *GetMetadata_ResourceMetadata) GetTypeName() string { @@ -1861,7 +2255,7 @@ type GetProviderSchema_Request struct { func (x *GetProviderSchema_Request) Reset() { *x = GetProviderSchema_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[31] + mi := &file_tfplugin6_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1874,7 +2268,7 @@ func (x *GetProviderSchema_Request) String() string { func (*GetProviderSchema_Request) ProtoMessage() {} func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[31] + mi := &file_tfplugin6_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1887,7 +2281,7 @@ func (x *GetProviderSchema_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Request.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{8, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 0} } type GetProviderSchema_Response struct { @@ -1901,12 +2295,14 @@ type GetProviderSchema_Response struct { Diagnostics []*Diagnostic `protobuf:"bytes,4,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` ProviderMeta *Schema `protobuf:"bytes,5,opt,name=provider_meta,json=providerMeta,proto3" json:"provider_meta,omitempty"` ServerCapabilities *ServerCapabilities `protobuf:"bytes,6,opt,name=server_capabilities,json=serverCapabilities,proto3" json:"server_capabilities,omitempty"` + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,7,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *GetProviderSchema_Response) Reset() { *x = GetProviderSchema_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[32] + mi := &file_tfplugin6_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1919,7 +2315,7 @@ func (x *GetProviderSchema_Response) String() string { func (*GetProviderSchema_Response) ProtoMessage() {} func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[32] + mi := &file_tfplugin6_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1932,7 +2328,7 @@ func (x *GetProviderSchema_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use GetProviderSchema_Response.ProtoReflect.Descriptor instead. func (*GetProviderSchema_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{8, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{9, 1} } func (x *GetProviderSchema_Response) GetProvider() *Schema { @@ -1977,6 +2373,13 @@ func (x *GetProviderSchema_Response) GetServerCapabilities() *ServerCapabilities return nil } +func (x *GetProviderSchema_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + type ValidateProviderConfig_Request struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1988,7 +2391,7 @@ type ValidateProviderConfig_Request struct { func (x *ValidateProviderConfig_Request) Reset() { *x = ValidateProviderConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[35] + mi := &file_tfplugin6_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2001,7 +2404,7 @@ func (x *ValidateProviderConfig_Request) String() string { func (*ValidateProviderConfig_Request) ProtoMessage() {} func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[35] + mi := &file_tfplugin6_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2014,7 +2417,7 @@ func (x *ValidateProviderConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProviderConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateProviderConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 0} } func (x *ValidateProviderConfig_Request) GetConfig() *DynamicValue { @@ -2035,7 +2438,7 @@ type ValidateProviderConfig_Response struct { func (x *ValidateProviderConfig_Response) Reset() { *x = ValidateProviderConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[36] + mi := &file_tfplugin6_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2048,7 +2451,7 @@ func (x *ValidateProviderConfig_Response) String() string { func (*ValidateProviderConfig_Response) ProtoMessage() {} func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[36] + mi := &file_tfplugin6_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2061,7 +2464,7 @@ func (x *ValidateProviderConfig_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateProviderConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateProviderConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{9, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{10, 1} } func (x *ValidateProviderConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2099,7 +2502,7 @@ type UpgradeResourceState_Request struct { func (x *UpgradeResourceState_Request) Reset() { *x = UpgradeResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[37] + mi := &file_tfplugin6_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2112,7 +2515,7 @@ func (x *UpgradeResourceState_Request) String() string { func (*UpgradeResourceState_Request) ProtoMessage() {} func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[37] + mi := &file_tfplugin6_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2125,7 +2528,7 @@ func (x *UpgradeResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Request.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{10, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 0} } func (x *UpgradeResourceState_Request) GetTypeName() string { @@ -2167,7 +2570,7 @@ type UpgradeResourceState_Response struct { func (x *UpgradeResourceState_Response) Reset() { *x = UpgradeResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[38] + mi := &file_tfplugin6_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2180,7 +2583,7 @@ func (x *UpgradeResourceState_Response) String() string { func (*UpgradeResourceState_Response) ProtoMessage() {} func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[38] + mi := &file_tfplugin6_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2193,7 +2596,7 @@ func (x *UpgradeResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use UpgradeResourceState_Response.ProtoReflect.Descriptor instead. func (*UpgradeResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{10, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{11, 1} } func (x *UpgradeResourceState_Response) GetUpgradedState() *DynamicValue { @@ -2222,7 +2625,7 @@ type ValidateResourceConfig_Request struct { func (x *ValidateResourceConfig_Request) Reset() { *x = ValidateResourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[39] + mi := &file_tfplugin6_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2235,7 +2638,7 @@ func (x *ValidateResourceConfig_Request) String() string { func (*ValidateResourceConfig_Request) ProtoMessage() {} func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[39] + mi := &file_tfplugin6_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2248,7 +2651,7 @@ func (x *ValidateResourceConfig_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateResourceConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{11, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 0} } func (x *ValidateResourceConfig_Request) GetTypeName() string { @@ -2276,7 +2679,7 @@ type ValidateResourceConfig_Response struct { func (x *ValidateResourceConfig_Response) Reset() { *x = ValidateResourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[40] + mi := &file_tfplugin6_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2289,7 +2692,7 @@ func (x *ValidateResourceConfig_Response) String() string { func (*ValidateResourceConfig_Response) ProtoMessage() {} func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[40] + mi := &file_tfplugin6_proto_msgTypes[47] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2302,7 +2705,7 @@ func (x *ValidateResourceConfig_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ValidateResourceConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateResourceConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{11, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{12, 1} } func (x *ValidateResourceConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2324,7 +2727,7 @@ type ValidateDataResourceConfig_Request struct { func (x *ValidateDataResourceConfig_Request) Reset() { *x = ValidateDataResourceConfig_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[41] + mi := &file_tfplugin6_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2337,7 +2740,7 @@ func (x *ValidateDataResourceConfig_Request) String() string { func (*ValidateDataResourceConfig_Request) ProtoMessage() {} func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[41] + mi := &file_tfplugin6_proto_msgTypes[48] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2350,7 +2753,7 @@ func (x *ValidateDataResourceConfig_Request) ProtoReflect() protoreflect.Message // Deprecated: Use ValidateDataResourceConfig_Request.ProtoReflect.Descriptor instead. func (*ValidateDataResourceConfig_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{12, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 0} } func (x *ValidateDataResourceConfig_Request) GetTypeName() string { @@ -2378,7 +2781,7 @@ type ValidateDataResourceConfig_Response struct { func (x *ValidateDataResourceConfig_Response) Reset() { *x = ValidateDataResourceConfig_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[42] + mi := &file_tfplugin6_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2391,7 +2794,7 @@ func (x *ValidateDataResourceConfig_Response) String() string { func (*ValidateDataResourceConfig_Response) ProtoMessage() {} func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[42] + mi := &file_tfplugin6_proto_msgTypes[49] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2404,7 +2807,7 @@ func (x *ValidateDataResourceConfig_Response) ProtoReflect() protoreflect.Messag // Deprecated: Use ValidateDataResourceConfig_Response.ProtoReflect.Descriptor instead. func (*ValidateDataResourceConfig_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{12, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{13, 1} } func (x *ValidateDataResourceConfig_Response) GetDiagnostics() []*Diagnostic { @@ -2426,7 +2829,7 @@ type ConfigureProvider_Request struct { func (x *ConfigureProvider_Request) Reset() { *x = ConfigureProvider_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[43] + mi := &file_tfplugin6_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2439,7 +2842,7 @@ func (x *ConfigureProvider_Request) String() string { func (*ConfigureProvider_Request) ProtoMessage() {} func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[43] + mi := &file_tfplugin6_proto_msgTypes[50] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2452,7 +2855,7 @@ func (x *ConfigureProvider_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigureProvider_Request.ProtoReflect.Descriptor instead. func (*ConfigureProvider_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{13, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 0} } func (x *ConfigureProvider_Request) GetTerraformVersion() string { @@ -2480,7 +2883,7 @@ type ConfigureProvider_Response struct { func (x *ConfigureProvider_Response) Reset() { *x = ConfigureProvider_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[44] + mi := &file_tfplugin6_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2493,7 +2896,7 @@ func (x *ConfigureProvider_Response) String() string { func (*ConfigureProvider_Response) ProtoMessage() {} func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[44] + mi := &file_tfplugin6_proto_msgTypes[51] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2506,7 +2909,7 @@ func (x *ConfigureProvider_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ConfigureProvider_Response.ProtoReflect.Descriptor instead. func (*ConfigureProvider_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{13, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{14, 1} } func (x *ConfigureProvider_Response) GetDiagnostics() []*Diagnostic { @@ -2538,7 +2941,7 @@ type ReadResource_Request struct { func (x *ReadResource_Request) Reset() { *x = ReadResource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[45] + mi := &file_tfplugin6_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2551,7 +2954,7 @@ func (x *ReadResource_Request) String() string { func (*ReadResource_Request) ProtoMessage() {} func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[45] + mi := &file_tfplugin6_proto_msgTypes[52] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2564,7 +2967,7 @@ func (x *ReadResource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Request.ProtoReflect.Descriptor instead. func (*ReadResource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{14, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 0} } func (x *ReadResource_Request) GetTypeName() string { @@ -2608,7 +3011,7 @@ type ReadResource_Response struct { func (x *ReadResource_Response) Reset() { *x = ReadResource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[46] + mi := &file_tfplugin6_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2621,7 +3024,7 @@ func (x *ReadResource_Response) String() string { func (*ReadResource_Response) ProtoMessage() {} func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[46] + mi := &file_tfplugin6_proto_msgTypes[53] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2634,7 +3037,7 @@ func (x *ReadResource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadResource_Response.ProtoReflect.Descriptor instead. func (*ReadResource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{14, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{15, 1} } func (x *ReadResource_Response) GetNewState() *DynamicValue { @@ -2674,7 +3077,7 @@ type PlanResourceChange_Request struct { func (x *PlanResourceChange_Request) Reset() { *x = PlanResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[47] + mi := &file_tfplugin6_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2687,7 +3090,7 @@ func (x *PlanResourceChange_Request) String() string { func (*PlanResourceChange_Request) ProtoMessage() {} func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[47] + mi := &file_tfplugin6_proto_msgTypes[54] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2700,7 +3103,7 @@ func (x *PlanResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Request.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{15, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 0} } func (x *PlanResourceChange_Request) GetTypeName() string { @@ -2771,7 +3174,7 @@ type PlanResourceChange_Response struct { func (x *PlanResourceChange_Response) Reset() { *x = PlanResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[48] + mi := &file_tfplugin6_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2784,7 +3187,7 @@ func (x *PlanResourceChange_Response) String() string { func (*PlanResourceChange_Response) ProtoMessage() {} func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[48] + mi := &file_tfplugin6_proto_msgTypes[55] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2797,7 +3200,7 @@ func (x *PlanResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use PlanResourceChange_Response.ProtoReflect.Descriptor instead. func (*PlanResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{15, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{16, 1} } func (x *PlanResourceChange_Response) GetPlannedState() *DynamicValue { @@ -2851,7 +3254,7 @@ type ApplyResourceChange_Request struct { func (x *ApplyResourceChange_Request) Reset() { *x = ApplyResourceChange_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[49] + mi := &file_tfplugin6_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2864,7 +3267,7 @@ func (x *ApplyResourceChange_Request) String() string { func (*ApplyResourceChange_Request) ProtoMessage() {} func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[49] + mi := &file_tfplugin6_proto_msgTypes[56] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2877,7 +3280,7 @@ func (x *ApplyResourceChange_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Request.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{16, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 0} } func (x *ApplyResourceChange_Request) GetTypeName() string { @@ -2947,7 +3350,7 @@ type ApplyResourceChange_Response struct { func (x *ApplyResourceChange_Response) Reset() { *x = ApplyResourceChange_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[50] + mi := &file_tfplugin6_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2960,7 +3363,7 @@ func (x *ApplyResourceChange_Response) String() string { func (*ApplyResourceChange_Response) ProtoMessage() {} func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[50] + mi := &file_tfplugin6_proto_msgTypes[57] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2973,7 +3376,7 @@ func (x *ApplyResourceChange_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ApplyResourceChange_Response.ProtoReflect.Descriptor instead. func (*ApplyResourceChange_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{16, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{17, 1} } func (x *ApplyResourceChange_Response) GetNewState() *DynamicValue { @@ -3016,7 +3419,7 @@ type ImportResourceState_Request struct { func (x *ImportResourceState_Request) Reset() { *x = ImportResourceState_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[51] + mi := &file_tfplugin6_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3029,7 +3432,7 @@ func (x *ImportResourceState_Request) String() string { func (*ImportResourceState_Request) ProtoMessage() {} func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[51] + mi := &file_tfplugin6_proto_msgTypes[58] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3042,7 +3445,7 @@ func (x *ImportResourceState_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Request.ProtoReflect.Descriptor instead. func (*ImportResourceState_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 0} } func (x *ImportResourceState_Request) GetTypeName() string { @@ -3072,7 +3475,7 @@ type ImportResourceState_ImportedResource struct { func (x *ImportResourceState_ImportedResource) Reset() { *x = ImportResourceState_ImportedResource{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[52] + mi := &file_tfplugin6_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3085,7 +3488,7 @@ func (x *ImportResourceState_ImportedResource) String() string { func (*ImportResourceState_ImportedResource) ProtoMessage() {} func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[52] + mi := &file_tfplugin6_proto_msgTypes[59] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3098,7 +3501,7 @@ func (x *ImportResourceState_ImportedResource) ProtoReflect() protoreflect.Messa // Deprecated: Use ImportResourceState_ImportedResource.ProtoReflect.Descriptor instead. func (*ImportResourceState_ImportedResource) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 1} } func (x *ImportResourceState_ImportedResource) GetTypeName() string { @@ -3134,7 +3537,7 @@ type ImportResourceState_Response struct { func (x *ImportResourceState_Response) Reset() { *x = ImportResourceState_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[53] + mi := &file_tfplugin6_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3147,7 +3550,7 @@ func (x *ImportResourceState_Response) String() string { func (*ImportResourceState_Response) ProtoMessage() {} func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[53] + mi := &file_tfplugin6_proto_msgTypes[60] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3160,7 +3563,7 @@ func (x *ImportResourceState_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ImportResourceState_Response.ProtoReflect.Descriptor instead. func (*ImportResourceState_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{17, 2} + return file_tfplugin6_proto_rawDescGZIP(), []int{18, 2} } func (x *ImportResourceState_Response) GetImportedResources() []*ImportResourceState_ImportedResource { @@ -3190,7 +3593,7 @@ type ReadDataSource_Request struct { func (x *ReadDataSource_Request) Reset() { *x = ReadDataSource_Request{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[54] + mi := &file_tfplugin6_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3203,7 +3606,7 @@ func (x *ReadDataSource_Request) String() string { func (*ReadDataSource_Request) ProtoMessage() {} func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[54] + mi := &file_tfplugin6_proto_msgTypes[61] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3216,7 +3619,7 @@ func (x *ReadDataSource_Request) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Request.ProtoReflect.Descriptor instead. func (*ReadDataSource_Request) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{18, 0} + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 0} } func (x *ReadDataSource_Request) GetTypeName() string { @@ -3252,7 +3655,7 @@ type ReadDataSource_Response struct { func (x *ReadDataSource_Response) Reset() { *x = ReadDataSource_Response{} if protoimpl.UnsafeEnabled { - mi := &file_tfplugin6_proto_msgTypes[55] + mi := &file_tfplugin6_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3265,7 +3668,7 @@ func (x *ReadDataSource_Response) String() string { func (*ReadDataSource_Response) ProtoMessage() {} func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { - mi := &file_tfplugin6_proto_msgTypes[55] + mi := &file_tfplugin6_proto_msgTypes[62] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3278,7 +3681,7 @@ func (x *ReadDataSource_Response) ProtoReflect() protoreflect.Message { // Deprecated: Use ReadDataSource_Response.ProtoReflect.Descriptor instead. func (*ReadDataSource_Response) Descriptor() ([]byte, []int) { - return file_tfplugin6_proto_rawDescGZIP(), []int{18, 1} + return file_tfplugin6_proto_rawDescGZIP(), []int{19, 1} } func (x *ReadDataSource_Response) GetState() *DynamicValue { @@ -3295,6 +3698,215 @@ func (x *ReadDataSource_Response) GetDiagnostics() []*Diagnostic { return nil } +type GetFunctions_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *GetFunctions_Request) Reset() { + *x = GetFunctions_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[63] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Request) ProtoMessage() {} + +func (x *GetFunctions_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[63] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Request.ProtoReflect.Descriptor instead. +func (*GetFunctions_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 0} +} + +type GetFunctions_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // functions is a mapping of function names to definitions. + Functions map[string]*Function `protobuf:"bytes,1,rep,name=functions,proto3" json:"functions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // diagnostics is any warnings or errors. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *GetFunctions_Response) Reset() { + *x = GetFunctions_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[64] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetFunctions_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetFunctions_Response) ProtoMessage() {} + +func (x *GetFunctions_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[64] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetFunctions_Response.ProtoReflect.Descriptor instead. +func (*GetFunctions_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{20, 1} +} + +func (x *GetFunctions_Response) GetFunctions() map[string]*Function { + if x != nil { + return x.Functions + } + return nil +} + +func (x *GetFunctions_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + +type CallFunction_Request struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // name is the name of the function being called. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // arguments is the data of each function argument value. + Arguments []*DynamicValue `protobuf:"bytes,2,rep,name=arguments,proto3" json:"arguments,omitempty"` +} + +func (x *CallFunction_Request) Reset() { + *x = CallFunction_Request{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[66] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Request) ProtoMessage() {} + +func (x *CallFunction_Request) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[66] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Request.ProtoReflect.Descriptor instead. +func (*CallFunction_Request) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 0} +} + +func (x *CallFunction_Request) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CallFunction_Request) GetArguments() []*DynamicValue { + if x != nil { + return x.Arguments + } + return nil +} + +type CallFunction_Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // result is result value after running the function logic. + Result *DynamicValue `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty"` + // diagnostics is any warnings or errors from the function logic. + Diagnostics []*Diagnostic `protobuf:"bytes,2,rep,name=diagnostics,proto3" json:"diagnostics,omitempty"` +} + +func (x *CallFunction_Response) Reset() { + *x = CallFunction_Response{} + if protoimpl.UnsafeEnabled { + mi := &file_tfplugin6_proto_msgTypes[67] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CallFunction_Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CallFunction_Response) ProtoMessage() {} + +func (x *CallFunction_Response) ProtoReflect() protoreflect.Message { + mi := &file_tfplugin6_proto_msgTypes[67] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CallFunction_Response.ProtoReflect.Descriptor instead. +func (*CallFunction_Response) Descriptor() ([]byte, []int) { + return file_tfplugin6_proto_rawDescGZIP(), []int{21, 1} +} + +func (x *CallFunction_Response) GetResult() *DynamicValue { + if x != nil { + return x.Result + } + return nil +} + +func (x *CallFunction_Response) GetDiagnostics() []*Diagnostic { + if x != nil { + return x.Diagnostics + } + return nil +} + var File_tfplugin6_proto protoreflect.FileDescriptor var file_tfplugin6_proto_rawDesc = []byte{ @@ -3303,7 +3915,7 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x6d, 0x73, 0x67, 0x70, 0x61, 0x63, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xe3, 0x01, 0x0a, 0x0a, 0x44, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0xab, 0x02, 0x0a, 0x0a, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x12, 0x3a, 0x0a, 0x08, 0x73, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, @@ -3314,397 +3926,490 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x06, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x36, 0x0a, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, - 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x22, - 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, 0x07, 0x49, - 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, - 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, - 0x22, 0xdc, 0x01, 0x0a, 0x0d, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, - 0x74, 0x68, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, - 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, - 0x12, 0x27, 0x0a, 0x0e, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, - 0x69, 0x62, 0x75, 0x74, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, - 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, - 0x49, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x73, 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, - 0x3b, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, - 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, - 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, - 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, - 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x0a, 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, - 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, - 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, - 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, - 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xe4, - 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, - 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x0a, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, - 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, - 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, - 0x70, 0x75, 0x74, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, - 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, - 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, - 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa7, 0x02, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, - 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, - 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x50, 0x61, 0x74, 0x68, 0x52, 0x09, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, + 0x30, 0x0a, 0x11, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x72, 0x67, 0x75, + 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x10, 0x66, 0x75, + 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x88, 0x01, + 0x01, 0x22, 0x2f, 0x0a, 0x08, 0x53, 0x65, 0x76, 0x65, 0x72, 0x69, 0x74, 0x79, 0x12, 0x0b, 0x0a, + 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, + 0x52, 0x4f, 0x52, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x57, 0x41, 0x52, 0x4e, 0x49, 0x4e, 0x47, + 0x10, 0x02, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x22, 0xdc, 0x01, 0x0a, 0x0d, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x33, 0x0a, 0x05, 0x73, 0x74, + 0x65, 0x70, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x50, + 0x61, 0x74, 0x68, 0x2e, 0x53, 0x74, 0x65, 0x70, 0x52, 0x05, 0x73, 0x74, 0x65, 0x70, 0x73, 0x1a, + 0x95, 0x01, 0x0a, 0x04, 0x53, 0x74, 0x65, 0x70, 0x12, 0x27, 0x0a, 0x0e, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x0d, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x4e, 0x61, 0x6d, + 0x65, 0x12, 0x2e, 0x0a, 0x12, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x10, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x12, 0x28, 0x0a, 0x0f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6b, 0x65, 0x79, + 0x5f, 0x69, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0d, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4b, 0x65, 0x79, 0x49, 0x6e, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x73, + 0x65, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x22, 0x3b, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, + 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x45, + 0x72, 0x72, 0x6f, 0x72, 0x22, 0x96, 0x01, 0x0a, 0x08, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x6a, 0x73, 0x6f, 0x6e, 0x12, 0x3a, 0x0a, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x46, 0x6c, 0x61, 0x74, + 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x66, 0x6c, 0x61, 0x74, 0x6d, 0x61, + 0x70, 0x1a, 0x3a, 0x0a, 0x0c, 0x46, 0x6c, 0x61, 0x74, 0x6d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x95, 0x0a, + 0x0a, 0x06, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, - 0x6b, 0x12, 0x43, 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, - 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, - 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, - 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x22, 0x4d, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, - 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, - 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, - 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, - 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x1a, - 0x8b, 0x02, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, - 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, - 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, - 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, - 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, - 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, - 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0x78, 0x0a, - 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, - 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, - 0x72, 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, - 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, - 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x22, 0xa7, 0x03, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0xa8, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, - 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, - 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, - 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, - 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x6b, 0x1a, 0xa2, 0x02, 0x0a, 0x05, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x73, 0x12, 0x3e, 0x0a, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x52, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x79, 0x70, + 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xe4, 0x02, 0x0a, 0x09, 0x41, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, + 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x0a, 0x6e, 0x65, 0x73, + 0x74, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, + 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x12, 0x1c, 0x0a, + 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x09, 0x73, 0x65, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x12, 0x40, 0x0a, 0x10, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, + 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x1a, 0xa7, 0x02, + 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x1b, 0x0a, + 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x52, 0x05, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x12, 0x43, 0x0a, 0x07, 0x6e, 0x65, 0x73, + 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x4e, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, + 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x22, 0x4d, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, + 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, + 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, + 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, + 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, 0x4d, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, + 0x47, 0x52, 0x4f, 0x55, 0x50, 0x10, 0x05, 0x1a, 0x8b, 0x02, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, + 0x3e, 0x0a, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x24, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4e, 0x65, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x07, 0x6e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x12, + 0x1f, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x1f, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x22, 0x42, 0x0a, 0x0b, 0x4e, 0x65, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x4d, 0x6f, 0x64, 0x65, + 0x12, 0x0b, 0x0a, 0x07, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, + 0x06, 0x53, 0x49, 0x4e, 0x47, 0x4c, 0x45, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, + 0x54, 0x10, 0x02, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x07, 0x0a, 0x03, + 0x4d, 0x41, 0x50, 0x10, 0x04, 0x22, 0x8e, 0x05, 0x0a, 0x08, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x12, 0x4c, 0x0a, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x11, 0x76, 0x61, + 0x72, 0x69, 0x61, 0x64, 0x69, 0x63, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x32, 0x0a, 0x06, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x74, + 0x75, 0x72, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, + 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, + 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x2f, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, + 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x1a, 0xf3, 0x01, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x61, 0x6c, 0x6c, 0x6f, + 0x77, 0x5f, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0e, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x75, 0x6e, 0x6b, 0x6e, + 0x6f, 0x77, 0x6e, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x12, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x10, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x0f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x1a, 0x1c, 0x0a, 0x06, 0x52, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x78, 0x0a, 0x12, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, + 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x64, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x0b, 0x70, 0x6c, 0x61, 0x6e, 0x44, 0x65, 0x73, 0x74, 0x72, 0x6f, 0x79, 0x12, + 0x3f, 0x0a, 0x1c, 0x67, 0x65, 0x74, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x19, 0x67, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x22, 0x96, 0x04, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xef, 0x02, 0x0a, 0x08, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, + 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x12, 0x4c, 0x0a, 0x0c, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x0b, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, + 0x45, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, + 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x45, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x1a, 0x31, 0x0a, - 0x12, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, + 0x61, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x26, 0x0a, + 0x10, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x1a, 0x31, 0x0a, 0x12, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xc7, 0x06, 0x0a, 0x11, 0x47, 0x65, + 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, + 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xa6, 0x06, 0x0a, 0x08, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, + 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, + 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, + 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, + 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, + 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, + 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, + 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x52, 0x0a, 0x09, + 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x34, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, 0x61, 0x74, 0x61, 0x53, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x1a, 0x51, 0x0a, 0x0e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x3a, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, + 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, - 0x1a, 0x2f, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x22, 0xa0, 0x05, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0xff, 0x04, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x2d, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x65, - 0x0a, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x73, 0x12, 0x6c, 0x0a, 0x13, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, - 0x65, 0x74, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x11, 0x64, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x36, 0x0a, 0x0d, - 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x13, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x5f, 0x63, - 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x65, - 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, - 0x52, 0x12, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, - 0x74, 0x69, 0x65, 0x73, 0x1a, 0x55, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x57, 0x0a, 0x16, 0x44, - 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, - 0x3a, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, - 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x22, 0x90, 0x02, 0x0a, 0x14, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x72, 0x0a, 0x07, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, - 0x61, 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x09, 0x72, 0x61, + 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x61, 0x77, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x52, 0x08, 0x72, 0x61, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x83, 0x01, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x0e, 0x75, 0x70, 0x67, + 0x72, 0x61, 0x64, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, 0x72, + 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, + 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, + 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, 0x1a, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0d, 0x75, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, - 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, - 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, 0x67, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, 0x72, + 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x56, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, - 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xba, 0x01, 0x0a, - 0x1a, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x52, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x57, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, 0x0a, + 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, 0x01, + 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, + 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, + 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, + 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, + 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, 0x0a, + 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, + 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, + 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, + 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, + 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, + 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, + 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, 0x12, + 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, 0x69, + 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x74, + 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, + 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, + 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, + 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x11, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x1a, - 0x67, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2b, 0x0a, 0x11, 0x74, 0x65, - 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, - 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x43, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0xe3, 0x02, - 0x0a, 0x0c, 0x52, 0x65, 0x61, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, 0xbc, - 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, - 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x63, 0x75, 0x72, 0x72, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, - 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, - 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x93, 0x01, - 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, - 0x77, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, + 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, + 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, + 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, + 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69, - 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76, - 0x61, 0x74, 0x65, 0x22, 0xf2, 0x04, 0x0a, 0x12, 0x50, 0x6c, 0x61, 0x6e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x1a, 0xbb, 0x02, 0x0a, 0x07, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x45, 0x0a, - 0x12, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x5f, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, + 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x10, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x4e, 0x65, 0x77, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, - 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x5f, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x72, - 0x69, 0x6f, 0x72, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, + 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, + 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, + 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x9d, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, - 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, + 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x72, + 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, + 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, + 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, + 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, 0x0a, + 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, 0x10, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x43, 0x0a, 0x10, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x5f, - 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x50, 0x61, 0x74, 0x68, 0x52, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x73, 0x52, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, - 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, - 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, - 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, - 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x2c, 0x0a, 0x12, 0x6c, 0x65, - 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x54, 0x79, - 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0x92, 0x04, 0x0a, 0x13, 0x41, 0x70, 0x70, - 0x6c, 0x79, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, - 0x1a, 0xb6, 0x02, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x70, 0x72, 0x69, - 0x6f, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, - 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x27, 0x0a, 0x0f, 0x70, 0x6c, 0x61, 0x6e, 0x6e, 0x65, 0x64, 0x5f, 0x70, 0x72, - 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0e, 0x70, 0x6c, 0x61, - 0x6e, 0x6e, 0x65, 0x64, 0x50, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x3c, 0x0a, 0x0d, 0x70, - 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, + 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, 0x0a, + 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x1a, + 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, + 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, + 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, - 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0xc1, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6e, 0x65, 0x77, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x08, 0x6e, 0x65, 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, - 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, - 0x2c, 0x0a, 0x12, 0x6c, 0x65, 0x67, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x73, - 0x79, 0x73, 0x74, 0x65, 0x6d, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x6c, 0x65, 0x67, - 0x61, 0x63, 0x79, 0x54, 0x79, 0x70, 0x65, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x22, 0xed, 0x02, - 0x0a, 0x13, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x1a, 0x36, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x78, 0x0a, - 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2d, - 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, - 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, - 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x1a, 0xa3, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5e, 0x0a, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x49, 0x6d, 0x70, - 0x6f, 0x72, 0x74, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x52, 0x11, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x52, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, - 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x9c, 0x02, - 0x0a, 0x0e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x1a, 0x95, 0x01, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x79, 0x70, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x79, 0x70, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, - 0x75, 0x65, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3c, 0x0a, 0x0d, 0x70, 0x72, - 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x79, - 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x70, 0x72, 0x6f, 0x76, - 0x69, 0x64, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x72, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, - 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, + 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x22, 0x81, 0x02, 0x0a, 0x0c, + 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x09, 0x0a, 0x07, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0xe5, 0x01, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, + 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x1a, 0x51, 0x0a, 0x0e, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x29, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x46, 0x75, 0x6e, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0xda, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x1a, 0x54, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x35, 0x0a, 0x09, 0x61, 0x72, 0x67, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x61, 0x72, 0x67, + 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x74, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, + 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x37, 0x0a, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x52, 0x0b, 0x64, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x73, 0x2a, 0x25, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x09, 0x0a, 0x05, 0x50, 0x4c, 0x41, 0x49, 0x4e, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x52, 0x4b, 0x44, 0x4f, 0x57, - 0x4e, 0x10, 0x01, 0x32, 0x9c, 0x0a, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, + 0x4e, 0x10, 0x01, 0x32, 0xc2, 0x0b, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x4e, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1e, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, @@ -3781,17 +4486,27 @@ var file_tfplugin6_proto_rawDesc = []byte{ 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x44, 0x61, 0x74, 0x61, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x51, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, - 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, - 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, - 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, - 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, - 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, - 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x36, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x51, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, 0x46, + 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x47, 0x65, 0x74, + 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, 0x43, + 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x36, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x1f, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x36, 0x2e, 0x53, 0x74, 0x6f, 0x70, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x5a, 0x45, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, + 0x2f, 0x74, 0x65, 0x72, 0x72, 0x61, 0x66, 0x6f, 0x72, 0x6d, 0x2d, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x2d, 0x67, 0x6f, 0x2f, 0x74, 0x66, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x76, 0x36, 0x2f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x74, 0x66, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x36, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -3807,7 +4522,7 @@ func file_tfplugin6_proto_rawDescGZIP() []byte { } var file_tfplugin6_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 56) +var file_tfplugin6_proto_msgTypes = make([]protoimpl.MessageInfo, 68) var file_tfplugin6_proto_goTypes = []interface{}{ (StringKind)(0), // 0: tfplugin6.StringKind (Diagnostic_Severity)(0), // 1: tfplugin6.Diagnostic.Severity @@ -3819,150 +4534,180 @@ var file_tfplugin6_proto_goTypes = []interface{}{ (*StopProvider)(nil), // 7: tfplugin6.StopProvider (*RawState)(nil), // 8: tfplugin6.RawState (*Schema)(nil), // 9: tfplugin6.Schema - (*ServerCapabilities)(nil), // 10: tfplugin6.ServerCapabilities - (*GetMetadata)(nil), // 11: tfplugin6.GetMetadata - (*GetProviderSchema)(nil), // 12: tfplugin6.GetProviderSchema - (*ValidateProviderConfig)(nil), // 13: tfplugin6.ValidateProviderConfig - (*UpgradeResourceState)(nil), // 14: tfplugin6.UpgradeResourceState - (*ValidateResourceConfig)(nil), // 15: tfplugin6.ValidateResourceConfig - (*ValidateDataResourceConfig)(nil), // 16: tfplugin6.ValidateDataResourceConfig - (*ConfigureProvider)(nil), // 17: tfplugin6.ConfigureProvider - (*ReadResource)(nil), // 18: tfplugin6.ReadResource - (*PlanResourceChange)(nil), // 19: tfplugin6.PlanResourceChange - (*ApplyResourceChange)(nil), // 20: tfplugin6.ApplyResourceChange - (*ImportResourceState)(nil), // 21: tfplugin6.ImportResourceState - (*ReadDataSource)(nil), // 22: tfplugin6.ReadDataSource - (*AttributePath_Step)(nil), // 23: tfplugin6.AttributePath.Step - (*StopProvider_Request)(nil), // 24: tfplugin6.StopProvider.Request - (*StopProvider_Response)(nil), // 25: tfplugin6.StopProvider.Response - nil, // 26: tfplugin6.RawState.FlatmapEntry - (*Schema_Block)(nil), // 27: tfplugin6.Schema.Block - (*Schema_Attribute)(nil), // 28: tfplugin6.Schema.Attribute - (*Schema_NestedBlock)(nil), // 29: tfplugin6.Schema.NestedBlock - (*Schema_Object)(nil), // 30: tfplugin6.Schema.Object - (*GetMetadata_Request)(nil), // 31: tfplugin6.GetMetadata.Request - (*GetMetadata_Response)(nil), // 32: tfplugin6.GetMetadata.Response - (*GetMetadata_DataSourceMetadata)(nil), // 33: tfplugin6.GetMetadata.DataSourceMetadata - (*GetMetadata_ResourceMetadata)(nil), // 34: tfplugin6.GetMetadata.ResourceMetadata - (*GetProviderSchema_Request)(nil), // 35: tfplugin6.GetProviderSchema.Request - (*GetProviderSchema_Response)(nil), // 36: tfplugin6.GetProviderSchema.Response - nil, // 37: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry - nil, // 38: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry - (*ValidateProviderConfig_Request)(nil), // 39: tfplugin6.ValidateProviderConfig.Request - (*ValidateProviderConfig_Response)(nil), // 40: tfplugin6.ValidateProviderConfig.Response - (*UpgradeResourceState_Request)(nil), // 41: tfplugin6.UpgradeResourceState.Request - (*UpgradeResourceState_Response)(nil), // 42: tfplugin6.UpgradeResourceState.Response - (*ValidateResourceConfig_Request)(nil), // 43: tfplugin6.ValidateResourceConfig.Request - (*ValidateResourceConfig_Response)(nil), // 44: tfplugin6.ValidateResourceConfig.Response - (*ValidateDataResourceConfig_Request)(nil), // 45: tfplugin6.ValidateDataResourceConfig.Request - (*ValidateDataResourceConfig_Response)(nil), // 46: tfplugin6.ValidateDataResourceConfig.Response - (*ConfigureProvider_Request)(nil), // 47: tfplugin6.ConfigureProvider.Request - (*ConfigureProvider_Response)(nil), // 48: tfplugin6.ConfigureProvider.Response - (*ReadResource_Request)(nil), // 49: tfplugin6.ReadResource.Request - (*ReadResource_Response)(nil), // 50: tfplugin6.ReadResource.Response - (*PlanResourceChange_Request)(nil), // 51: tfplugin6.PlanResourceChange.Request - (*PlanResourceChange_Response)(nil), // 52: tfplugin6.PlanResourceChange.Response - (*ApplyResourceChange_Request)(nil), // 53: tfplugin6.ApplyResourceChange.Request - (*ApplyResourceChange_Response)(nil), // 54: tfplugin6.ApplyResourceChange.Response - (*ImportResourceState_Request)(nil), // 55: tfplugin6.ImportResourceState.Request - (*ImportResourceState_ImportedResource)(nil), // 56: tfplugin6.ImportResourceState.ImportedResource - (*ImportResourceState_Response)(nil), // 57: tfplugin6.ImportResourceState.Response - (*ReadDataSource_Request)(nil), // 58: tfplugin6.ReadDataSource.Request - (*ReadDataSource_Response)(nil), // 59: tfplugin6.ReadDataSource.Response + (*Function)(nil), // 10: tfplugin6.Function + (*ServerCapabilities)(nil), // 11: tfplugin6.ServerCapabilities + (*GetMetadata)(nil), // 12: tfplugin6.GetMetadata + (*GetProviderSchema)(nil), // 13: tfplugin6.GetProviderSchema + (*ValidateProviderConfig)(nil), // 14: tfplugin6.ValidateProviderConfig + (*UpgradeResourceState)(nil), // 15: tfplugin6.UpgradeResourceState + (*ValidateResourceConfig)(nil), // 16: tfplugin6.ValidateResourceConfig + (*ValidateDataResourceConfig)(nil), // 17: tfplugin6.ValidateDataResourceConfig + (*ConfigureProvider)(nil), // 18: tfplugin6.ConfigureProvider + (*ReadResource)(nil), // 19: tfplugin6.ReadResource + (*PlanResourceChange)(nil), // 20: tfplugin6.PlanResourceChange + (*ApplyResourceChange)(nil), // 21: tfplugin6.ApplyResourceChange + (*ImportResourceState)(nil), // 22: tfplugin6.ImportResourceState + (*ReadDataSource)(nil), // 23: tfplugin6.ReadDataSource + (*GetFunctions)(nil), // 24: tfplugin6.GetFunctions + (*CallFunction)(nil), // 25: tfplugin6.CallFunction + (*AttributePath_Step)(nil), // 26: tfplugin6.AttributePath.Step + (*StopProvider_Request)(nil), // 27: tfplugin6.StopProvider.Request + (*StopProvider_Response)(nil), // 28: tfplugin6.StopProvider.Response + nil, // 29: tfplugin6.RawState.FlatmapEntry + (*Schema_Block)(nil), // 30: tfplugin6.Schema.Block + (*Schema_Attribute)(nil), // 31: tfplugin6.Schema.Attribute + (*Schema_NestedBlock)(nil), // 32: tfplugin6.Schema.NestedBlock + (*Schema_Object)(nil), // 33: tfplugin6.Schema.Object + (*Function_Parameter)(nil), // 34: tfplugin6.Function.Parameter + (*Function_Return)(nil), // 35: tfplugin6.Function.Return + (*GetMetadata_Request)(nil), // 36: tfplugin6.GetMetadata.Request + (*GetMetadata_Response)(nil), // 37: tfplugin6.GetMetadata.Response + (*GetMetadata_FunctionMetadata)(nil), // 38: tfplugin6.GetMetadata.FunctionMetadata + (*GetMetadata_DataSourceMetadata)(nil), // 39: tfplugin6.GetMetadata.DataSourceMetadata + (*GetMetadata_ResourceMetadata)(nil), // 40: tfplugin6.GetMetadata.ResourceMetadata + (*GetProviderSchema_Request)(nil), // 41: tfplugin6.GetProviderSchema.Request + (*GetProviderSchema_Response)(nil), // 42: tfplugin6.GetProviderSchema.Response + nil, // 43: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + nil, // 44: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + nil, // 45: tfplugin6.GetProviderSchema.Response.FunctionsEntry + (*ValidateProviderConfig_Request)(nil), // 46: tfplugin6.ValidateProviderConfig.Request + (*ValidateProviderConfig_Response)(nil), // 47: tfplugin6.ValidateProviderConfig.Response + (*UpgradeResourceState_Request)(nil), // 48: tfplugin6.UpgradeResourceState.Request + (*UpgradeResourceState_Response)(nil), // 49: tfplugin6.UpgradeResourceState.Response + (*ValidateResourceConfig_Request)(nil), // 50: tfplugin6.ValidateResourceConfig.Request + (*ValidateResourceConfig_Response)(nil), // 51: tfplugin6.ValidateResourceConfig.Response + (*ValidateDataResourceConfig_Request)(nil), // 52: tfplugin6.ValidateDataResourceConfig.Request + (*ValidateDataResourceConfig_Response)(nil), // 53: tfplugin6.ValidateDataResourceConfig.Response + (*ConfigureProvider_Request)(nil), // 54: tfplugin6.ConfigureProvider.Request + (*ConfigureProvider_Response)(nil), // 55: tfplugin6.ConfigureProvider.Response + (*ReadResource_Request)(nil), // 56: tfplugin6.ReadResource.Request + (*ReadResource_Response)(nil), // 57: tfplugin6.ReadResource.Response + (*PlanResourceChange_Request)(nil), // 58: tfplugin6.PlanResourceChange.Request + (*PlanResourceChange_Response)(nil), // 59: tfplugin6.PlanResourceChange.Response + (*ApplyResourceChange_Request)(nil), // 60: tfplugin6.ApplyResourceChange.Request + (*ApplyResourceChange_Response)(nil), // 61: tfplugin6.ApplyResourceChange.Response + (*ImportResourceState_Request)(nil), // 62: tfplugin6.ImportResourceState.Request + (*ImportResourceState_ImportedResource)(nil), // 63: tfplugin6.ImportResourceState.ImportedResource + (*ImportResourceState_Response)(nil), // 64: tfplugin6.ImportResourceState.Response + (*ReadDataSource_Request)(nil), // 65: tfplugin6.ReadDataSource.Request + (*ReadDataSource_Response)(nil), // 66: tfplugin6.ReadDataSource.Response + (*GetFunctions_Request)(nil), // 67: tfplugin6.GetFunctions.Request + (*GetFunctions_Response)(nil), // 68: tfplugin6.GetFunctions.Response + nil, // 69: tfplugin6.GetFunctions.Response.FunctionsEntry + (*CallFunction_Request)(nil), // 70: tfplugin6.CallFunction.Request + (*CallFunction_Response)(nil), // 71: tfplugin6.CallFunction.Response } var file_tfplugin6_proto_depIdxs = []int32{ 1, // 0: tfplugin6.Diagnostic.severity:type_name -> tfplugin6.Diagnostic.Severity 6, // 1: tfplugin6.Diagnostic.attribute:type_name -> tfplugin6.AttributePath - 23, // 2: tfplugin6.AttributePath.steps:type_name -> tfplugin6.AttributePath.Step - 26, // 3: tfplugin6.RawState.flatmap:type_name -> tfplugin6.RawState.FlatmapEntry - 27, // 4: tfplugin6.Schema.block:type_name -> tfplugin6.Schema.Block - 28, // 5: tfplugin6.Schema.Block.attributes:type_name -> tfplugin6.Schema.Attribute - 29, // 6: tfplugin6.Schema.Block.block_types:type_name -> tfplugin6.Schema.NestedBlock - 0, // 7: tfplugin6.Schema.Block.description_kind:type_name -> tfplugin6.StringKind - 30, // 8: tfplugin6.Schema.Attribute.nested_type:type_name -> tfplugin6.Schema.Object - 0, // 9: tfplugin6.Schema.Attribute.description_kind:type_name -> tfplugin6.StringKind - 27, // 10: tfplugin6.Schema.NestedBlock.block:type_name -> tfplugin6.Schema.Block - 2, // 11: tfplugin6.Schema.NestedBlock.nesting:type_name -> tfplugin6.Schema.NestedBlock.NestingMode - 28, // 12: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute - 3, // 13: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode - 10, // 14: tfplugin6.GetMetadata.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities - 5, // 15: tfplugin6.GetMetadata.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 33, // 16: tfplugin6.GetMetadata.Response.data_sources:type_name -> tfplugin6.GetMetadata.DataSourceMetadata - 34, // 17: tfplugin6.GetMetadata.Response.resources:type_name -> tfplugin6.GetMetadata.ResourceMetadata - 9, // 18: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema - 37, // 19: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry - 38, // 20: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry - 5, // 21: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 9, // 22: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema - 10, // 23: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities - 9, // 24: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema - 9, // 25: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema - 4, // 26: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 27: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 8, // 28: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState - 4, // 29: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue - 5, // 30: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 31: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 32: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 33: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 34: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 35: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue - 5, // 36: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 37: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue - 4, // 38: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 39: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue - 5, // 40: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 41: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue - 4, // 42: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue - 4, // 43: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 44: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 45: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue - 6, // 46: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath - 5, // 47: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 48: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue - 4, // 49: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue - 4, // 50: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 51: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 52: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue - 5, // 53: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 54: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue - 56, // 55: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource - 5, // 56: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 4, // 57: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue - 4, // 58: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue - 4, // 59: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue - 5, // 60: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic - 31, // 61: tfplugin6.Provider.GetMetadata:input_type -> tfplugin6.GetMetadata.Request - 35, // 62: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request - 39, // 63: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request - 43, // 64: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request - 45, // 65: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request - 41, // 66: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request - 47, // 67: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request - 49, // 68: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request - 51, // 69: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request - 53, // 70: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request - 55, // 71: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request - 58, // 72: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request - 24, // 73: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request - 32, // 74: tfplugin6.Provider.GetMetadata:output_type -> tfplugin6.GetMetadata.Response - 36, // 75: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response - 40, // 76: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response - 44, // 77: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response - 46, // 78: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response - 42, // 79: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response - 48, // 80: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response - 50, // 81: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response - 52, // 82: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response - 54, // 83: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response - 57, // 84: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response - 59, // 85: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response - 25, // 86: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response - 74, // [74:87] is the sub-list for method output_type - 61, // [61:74] is the sub-list for method input_type - 61, // [61:61] is the sub-list for extension type_name - 61, // [61:61] is the sub-list for extension extendee - 0, // [0:61] is the sub-list for field type_name + 26, // 2: tfplugin6.AttributePath.steps:type_name -> tfplugin6.AttributePath.Step + 29, // 3: tfplugin6.RawState.flatmap:type_name -> tfplugin6.RawState.FlatmapEntry + 30, // 4: tfplugin6.Schema.block:type_name -> tfplugin6.Schema.Block + 34, // 5: tfplugin6.Function.parameters:type_name -> tfplugin6.Function.Parameter + 34, // 6: tfplugin6.Function.variadic_parameter:type_name -> tfplugin6.Function.Parameter + 35, // 7: tfplugin6.Function.return:type_name -> tfplugin6.Function.Return + 0, // 8: tfplugin6.Function.description_kind:type_name -> tfplugin6.StringKind + 31, // 9: tfplugin6.Schema.Block.attributes:type_name -> tfplugin6.Schema.Attribute + 32, // 10: tfplugin6.Schema.Block.block_types:type_name -> tfplugin6.Schema.NestedBlock + 0, // 11: tfplugin6.Schema.Block.description_kind:type_name -> tfplugin6.StringKind + 33, // 12: tfplugin6.Schema.Attribute.nested_type:type_name -> tfplugin6.Schema.Object + 0, // 13: tfplugin6.Schema.Attribute.description_kind:type_name -> tfplugin6.StringKind + 30, // 14: tfplugin6.Schema.NestedBlock.block:type_name -> tfplugin6.Schema.Block + 2, // 15: tfplugin6.Schema.NestedBlock.nesting:type_name -> tfplugin6.Schema.NestedBlock.NestingMode + 31, // 16: tfplugin6.Schema.Object.attributes:type_name -> tfplugin6.Schema.Attribute + 3, // 17: tfplugin6.Schema.Object.nesting:type_name -> tfplugin6.Schema.Object.NestingMode + 0, // 18: tfplugin6.Function.Parameter.description_kind:type_name -> tfplugin6.StringKind + 11, // 19: tfplugin6.GetMetadata.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 5, // 20: tfplugin6.GetMetadata.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 39, // 21: tfplugin6.GetMetadata.Response.data_sources:type_name -> tfplugin6.GetMetadata.DataSourceMetadata + 40, // 22: tfplugin6.GetMetadata.Response.resources:type_name -> tfplugin6.GetMetadata.ResourceMetadata + 38, // 23: tfplugin6.GetMetadata.Response.functions:type_name -> tfplugin6.GetMetadata.FunctionMetadata + 9, // 24: tfplugin6.GetProviderSchema.Response.provider:type_name -> tfplugin6.Schema + 43, // 25: tfplugin6.GetProviderSchema.Response.resource_schemas:type_name -> tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry + 44, // 26: tfplugin6.GetProviderSchema.Response.data_source_schemas:type_name -> tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry + 5, // 27: tfplugin6.GetProviderSchema.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 9, // 28: tfplugin6.GetProviderSchema.Response.provider_meta:type_name -> tfplugin6.Schema + 11, // 29: tfplugin6.GetProviderSchema.Response.server_capabilities:type_name -> tfplugin6.ServerCapabilities + 45, // 30: tfplugin6.GetProviderSchema.Response.functions:type_name -> tfplugin6.GetProviderSchema.Response.FunctionsEntry + 9, // 31: tfplugin6.GetProviderSchema.Response.ResourceSchemasEntry.value:type_name -> tfplugin6.Schema + 9, // 32: tfplugin6.GetProviderSchema.Response.DataSourceSchemasEntry.value:type_name -> tfplugin6.Schema + 10, // 33: tfplugin6.GetProviderSchema.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 4, // 34: tfplugin6.ValidateProviderConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 35: tfplugin6.ValidateProviderConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 8, // 36: tfplugin6.UpgradeResourceState.Request.raw_state:type_name -> tfplugin6.RawState + 4, // 37: tfplugin6.UpgradeResourceState.Response.upgraded_state:type_name -> tfplugin6.DynamicValue + 5, // 38: tfplugin6.UpgradeResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 39: tfplugin6.ValidateResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 40: tfplugin6.ValidateResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 41: tfplugin6.ValidateDataResourceConfig.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 42: tfplugin6.ValidateDataResourceConfig.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 43: tfplugin6.ConfigureProvider.Request.config:type_name -> tfplugin6.DynamicValue + 5, // 44: tfplugin6.ConfigureProvider.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 45: tfplugin6.ReadResource.Request.current_state:type_name -> tfplugin6.DynamicValue + 4, // 46: tfplugin6.ReadResource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 47: tfplugin6.ReadResource.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 48: tfplugin6.ReadResource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 49: tfplugin6.PlanResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 50: tfplugin6.PlanResourceChange.Request.proposed_new_state:type_name -> tfplugin6.DynamicValue + 4, // 51: tfplugin6.PlanResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 52: tfplugin6.PlanResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 53: tfplugin6.PlanResourceChange.Response.planned_state:type_name -> tfplugin6.DynamicValue + 6, // 54: tfplugin6.PlanResourceChange.Response.requires_replace:type_name -> tfplugin6.AttributePath + 5, // 55: tfplugin6.PlanResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 56: tfplugin6.ApplyResourceChange.Request.prior_state:type_name -> tfplugin6.DynamicValue + 4, // 57: tfplugin6.ApplyResourceChange.Request.planned_state:type_name -> tfplugin6.DynamicValue + 4, // 58: tfplugin6.ApplyResourceChange.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 59: tfplugin6.ApplyResourceChange.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 60: tfplugin6.ApplyResourceChange.Response.new_state:type_name -> tfplugin6.DynamicValue + 5, // 61: tfplugin6.ApplyResourceChange.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 62: tfplugin6.ImportResourceState.ImportedResource.state:type_name -> tfplugin6.DynamicValue + 63, // 63: tfplugin6.ImportResourceState.Response.imported_resources:type_name -> tfplugin6.ImportResourceState.ImportedResource + 5, // 64: tfplugin6.ImportResourceState.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 4, // 65: tfplugin6.ReadDataSource.Request.config:type_name -> tfplugin6.DynamicValue + 4, // 66: tfplugin6.ReadDataSource.Request.provider_meta:type_name -> tfplugin6.DynamicValue + 4, // 67: tfplugin6.ReadDataSource.Response.state:type_name -> tfplugin6.DynamicValue + 5, // 68: tfplugin6.ReadDataSource.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 69, // 69: tfplugin6.GetFunctions.Response.functions:type_name -> tfplugin6.GetFunctions.Response.FunctionsEntry + 5, // 70: tfplugin6.GetFunctions.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 10, // 71: tfplugin6.GetFunctions.Response.FunctionsEntry.value:type_name -> tfplugin6.Function + 4, // 72: tfplugin6.CallFunction.Request.arguments:type_name -> tfplugin6.DynamicValue + 4, // 73: tfplugin6.CallFunction.Response.result:type_name -> tfplugin6.DynamicValue + 5, // 74: tfplugin6.CallFunction.Response.diagnostics:type_name -> tfplugin6.Diagnostic + 36, // 75: tfplugin6.Provider.GetMetadata:input_type -> tfplugin6.GetMetadata.Request + 41, // 76: tfplugin6.Provider.GetProviderSchema:input_type -> tfplugin6.GetProviderSchema.Request + 46, // 77: tfplugin6.Provider.ValidateProviderConfig:input_type -> tfplugin6.ValidateProviderConfig.Request + 50, // 78: tfplugin6.Provider.ValidateResourceConfig:input_type -> tfplugin6.ValidateResourceConfig.Request + 52, // 79: tfplugin6.Provider.ValidateDataResourceConfig:input_type -> tfplugin6.ValidateDataResourceConfig.Request + 48, // 80: tfplugin6.Provider.UpgradeResourceState:input_type -> tfplugin6.UpgradeResourceState.Request + 54, // 81: tfplugin6.Provider.ConfigureProvider:input_type -> tfplugin6.ConfigureProvider.Request + 56, // 82: tfplugin6.Provider.ReadResource:input_type -> tfplugin6.ReadResource.Request + 58, // 83: tfplugin6.Provider.PlanResourceChange:input_type -> tfplugin6.PlanResourceChange.Request + 60, // 84: tfplugin6.Provider.ApplyResourceChange:input_type -> tfplugin6.ApplyResourceChange.Request + 62, // 85: tfplugin6.Provider.ImportResourceState:input_type -> tfplugin6.ImportResourceState.Request + 65, // 86: tfplugin6.Provider.ReadDataSource:input_type -> tfplugin6.ReadDataSource.Request + 67, // 87: tfplugin6.Provider.GetFunctions:input_type -> tfplugin6.GetFunctions.Request + 70, // 88: tfplugin6.Provider.CallFunction:input_type -> tfplugin6.CallFunction.Request + 27, // 89: tfplugin6.Provider.StopProvider:input_type -> tfplugin6.StopProvider.Request + 37, // 90: tfplugin6.Provider.GetMetadata:output_type -> tfplugin6.GetMetadata.Response + 42, // 91: tfplugin6.Provider.GetProviderSchema:output_type -> tfplugin6.GetProviderSchema.Response + 47, // 92: tfplugin6.Provider.ValidateProviderConfig:output_type -> tfplugin6.ValidateProviderConfig.Response + 51, // 93: tfplugin6.Provider.ValidateResourceConfig:output_type -> tfplugin6.ValidateResourceConfig.Response + 53, // 94: tfplugin6.Provider.ValidateDataResourceConfig:output_type -> tfplugin6.ValidateDataResourceConfig.Response + 49, // 95: tfplugin6.Provider.UpgradeResourceState:output_type -> tfplugin6.UpgradeResourceState.Response + 55, // 96: tfplugin6.Provider.ConfigureProvider:output_type -> tfplugin6.ConfigureProvider.Response + 57, // 97: tfplugin6.Provider.ReadResource:output_type -> tfplugin6.ReadResource.Response + 59, // 98: tfplugin6.Provider.PlanResourceChange:output_type -> tfplugin6.PlanResourceChange.Response + 61, // 99: tfplugin6.Provider.ApplyResourceChange:output_type -> tfplugin6.ApplyResourceChange.Response + 64, // 100: tfplugin6.Provider.ImportResourceState:output_type -> tfplugin6.ImportResourceState.Response + 66, // 101: tfplugin6.Provider.ReadDataSource:output_type -> tfplugin6.ReadDataSource.Response + 68, // 102: tfplugin6.Provider.GetFunctions:output_type -> tfplugin6.GetFunctions.Response + 71, // 103: tfplugin6.Provider.CallFunction:output_type -> tfplugin6.CallFunction.Response + 28, // 104: tfplugin6.Provider.StopProvider:output_type -> tfplugin6.StopProvider.Response + 90, // [90:105] is the sub-list for method output_type + 75, // [75:90] is the sub-list for method input_type + 75, // [75:75] is the sub-list for extension type_name + 75, // [75:75] is the sub-list for extension extendee + 0, // [0:75] is the sub-list for field type_name } func init() { file_tfplugin6_proto_init() } @@ -4044,7 +4789,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ServerCapabilities); i { + switch v := v.(*Function); i { case 0: return &v.state case 1: @@ -4056,7 +4801,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetMetadata); i { + switch v := v.(*ServerCapabilities); i { case 0: return &v.state case 1: @@ -4068,7 +4813,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetProviderSchema); i { + switch v := v.(*GetMetadata); i { case 0: return &v.state case 1: @@ -4080,7 +4825,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateProviderConfig); i { + switch v := v.(*GetProviderSchema); i { case 0: return &v.state case 1: @@ -4092,7 +4837,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpgradeResourceState); i { + switch v := v.(*ValidateProviderConfig); i { case 0: return &v.state case 1: @@ -4104,7 +4849,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateResourceConfig); i { + switch v := v.(*UpgradeResourceState); i { case 0: return &v.state case 1: @@ -4116,7 +4861,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ValidateDataResourceConfig); i { + switch v := v.(*ValidateResourceConfig); i { case 0: return &v.state case 1: @@ -4128,7 +4873,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ConfigureProvider); i { + switch v := v.(*ValidateDataResourceConfig); i { case 0: return &v.state case 1: @@ -4140,7 +4885,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadResource); i { + switch v := v.(*ConfigureProvider); i { case 0: return &v.state case 1: @@ -4152,7 +4897,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PlanResourceChange); i { + switch v := v.(*ReadResource); i { case 0: return &v.state case 1: @@ -4164,7 +4909,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplyResourceChange); i { + switch v := v.(*PlanResourceChange); i { case 0: return &v.state case 1: @@ -4176,7 +4921,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportResourceState); i { + switch v := v.(*ApplyResourceChange); i { case 0: return &v.state case 1: @@ -4188,7 +4933,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ReadDataSource); i { + switch v := v.(*ImportResourceState); i { case 0: return &v.state case 1: @@ -4200,7 +4945,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AttributePath_Step); i { + switch v := v.(*ReadDataSource); i { case 0: return &v.state case 1: @@ -4212,7 +4957,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopProvider_Request); i { + switch v := v.(*GetFunctions); i { case 0: return &v.state case 1: @@ -4224,7 +4969,19 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StopProvider_Response); i { + switch v := v.(*CallFunction); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AttributePath_Step); i { case 0: return &v.state case 1: @@ -4236,7 +4993,7 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema_Block); i { + switch v := v.(*StopProvider_Request); i { case 0: return &v.state case 1: @@ -4248,6 +5005,30 @@ func file_tfplugin6_proto_init() { } } file_tfplugin6_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StopProvider_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema_Block); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Attribute); i { case 0: return &v.state @@ -4259,7 +5040,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_NestedBlock); i { case 0: return &v.state @@ -4271,7 +5052,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schema_Object); i { case 0: return &v.state @@ -4283,7 +5064,31 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Function_Return); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Request); i { case 0: return &v.state @@ -4295,7 +5100,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_Response); i { case 0: return &v.state @@ -4307,7 +5112,19 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetMetadata_FunctionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_DataSourceMetadata); i { case 0: return &v.state @@ -4319,7 +5136,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetMetadata_ResourceMetadata); i { case 0: return &v.state @@ -4331,7 +5148,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Request); i { case 0: return &v.state @@ -4343,7 +5160,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetProviderSchema_Response); i { case 0: return &v.state @@ -4355,7 +5172,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProviderConfig_Request); i { case 0: return &v.state @@ -4367,7 +5184,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateProviderConfig_Response); i { case 0: return &v.state @@ -4379,7 +5196,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Request); i { case 0: return &v.state @@ -4391,7 +5208,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpgradeResourceState_Response); i { case 0: return &v.state @@ -4403,7 +5220,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceConfig_Request); i { case 0: return &v.state @@ -4415,7 +5232,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateResourceConfig_Response); i { case 0: return &v.state @@ -4427,7 +5244,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataResourceConfig_Request); i { case 0: return &v.state @@ -4439,7 +5256,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ValidateDataResourceConfig_Response); i { case 0: return &v.state @@ -4451,7 +5268,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConfigureProvider_Request); i { case 0: return &v.state @@ -4463,7 +5280,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ConfigureProvider_Response); i { case 0: return &v.state @@ -4475,7 +5292,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Request); i { case 0: return &v.state @@ -4487,7 +5304,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadResource_Response); i { case 0: return &v.state @@ -4499,7 +5316,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Request); i { case 0: return &v.state @@ -4511,7 +5328,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PlanResourceChange_Response); i { case 0: return &v.state @@ -4523,7 +5340,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Request); i { case 0: return &v.state @@ -4535,7 +5352,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ApplyResourceChange_Response); i { case 0: return &v.state @@ -4547,7 +5364,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Request); i { case 0: return &v.state @@ -4559,7 +5376,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_ImportedResource); i { case 0: return &v.state @@ -4571,7 +5388,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ImportResourceState_Response); i { case 0: return &v.state @@ -4583,7 +5400,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Request); i { case 0: return &v.state @@ -4595,7 +5412,7 @@ func file_tfplugin6_proto_init() { return nil } } - file_tfplugin6_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + file_tfplugin6_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ReadDataSource_Response); i { case 0: return &v.state @@ -4607,8 +5424,57 @@ func file_tfplugin6_proto_init() { return nil } } + file_tfplugin6_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetFunctions_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Request); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_tfplugin6_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CallFunction_Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } } - file_tfplugin6_proto_msgTypes[19].OneofWrappers = []interface{}{ + file_tfplugin6_proto_msgTypes[1].OneofWrappers = []interface{}{} + file_tfplugin6_proto_msgTypes[22].OneofWrappers = []interface{}{ (*AttributePath_Step_AttributeName)(nil), (*AttributePath_Step_ElementKeyString)(nil), (*AttributePath_Step_ElementKeyInt)(nil), @@ -4619,7 +5485,7 @@ func file_tfplugin6_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_tfplugin6_proto_rawDesc, NumEnums: 4, - NumMessages: 56, + NumMessages: 68, NumExtensions: 0, NumServices: 1, }, diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto index a20ee8f8255..28fa668ddcc 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6.proto @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 6.4 +// Terraform Plugin RPC protocol version 6.5 // -// This file defines version 6.4 of the RPC protocol. To implement a plugin +// This file defines version 6.5 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -41,6 +41,10 @@ message Diagnostic { string summary = 2; string detail = 3; AttributePath attribute = 4; + + // function_argument is the positional function argument for aligning + // configuration source. + optional int64 function_argument = 5; } message AttributePath { @@ -147,6 +151,62 @@ message Schema { Block block = 2; } +message Function { + // parameters is the ordered list of positional function parameters. + repeated Parameter parameters = 1; + + // variadic_parameter is an optional final parameter which accepts + // zero or more argument values, in which Terraform will send an + // ordered list of the parameter type. + Parameter variadic_parameter = 2; + + // return is the function result. + Return return = 3; + + // summary is the human-readable shortened documentation for the function. + string summary = 4; + + // description is human-readable documentation for the function. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + + // deprecation_message is human-readable documentation if the + // function is deprecated. + string deprecation_message = 7; + + message Parameter { + // name is the human-readable display name for the parameter. + string name = 1; + + // type is the type constraint for the parameter. + bytes type = 2; + + // allow_null_value when enabled denotes that a null argument value can + // be passed to the provider. When disabled, Terraform returns an error + // if the argument value is null. + bool allow_null_value = 3; + + // allow_unknown_values when enabled denotes that only wholly known + // argument values will be passed to the provider. When disabled, + // Terraform skips the function call entirely and assumes an unknown + // value result from the function. + bool allow_unknown_values = 4; + + // description is human-readable documentation for the parameter. + string description = 5; + + // description_kind is the formatting of the description. + StringKind description_kind = 6; + } + + message Return { + // type is the type constraint for the function result. + bytes type = 1; + } +} + // ServerCapabilities allows providers to communicate extra information // regarding supported protocol features. This is used to indicate // availability of certain forward-compatible changes which may be optional @@ -192,6 +252,15 @@ service Provider { rpc ReadDataSource(ReadDataSource.Request) returns (ReadDataSource.Response); + // Functions + + // GetFunctions returns the definitions of all functions. + rpc GetFunctions(GetFunctions.Request) returns (GetFunctions.Response); + + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + rpc CallFunction(CallFunction.Request) returns (CallFunction.Response); + //////// Graceful Shutdown rpc StopProvider(StopProvider.Request) returns (StopProvider.Response); } @@ -205,6 +274,14 @@ message GetMetadata { repeated Diagnostic diagnostics = 2; repeated DataSourceMetadata data_sources = 3; repeated ResourceMetadata resources = 4; + + // functions returns metadata for any functions. + repeated FunctionMetadata functions = 5; + } + + message FunctionMetadata { + // name is the function name. + string name = 1; } message DataSourceMetadata { @@ -226,6 +303,9 @@ message GetProviderSchema { repeated Diagnostic diagnostics = 4; Schema provider_meta = 5; ServerCapabilities server_capabilities = 6; + + // functions is a mapping of function names to definitions. + map functions = 7; } } @@ -415,3 +495,33 @@ message ReadDataSource { repeated Diagnostic diagnostics = 2; } } + +message GetFunctions { + message Request {} + + message Response { + // functions is a mapping of function names to definitions. + map functions = 1; + + // diagnostics is any warnings or errors. + repeated Diagnostic diagnostics = 2; + } +} + +message CallFunction { + message Request { + // name is the name of the function being called. + string name = 1; + + // arguments is the data of each function argument value. + repeated DynamicValue arguments = 2; + } + + message Response { + // result is result value after running the function logic. + DynamicValue result = 1; + + // diagnostics is any warnings or errors from the function logic. + repeated Diagnostic diagnostics = 2; + } +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go index 4e92cbf4d90..53cdde89be9 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6/tfplugin6_grpc.pb.go @@ -1,9 +1,9 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -// Terraform Plugin RPC protocol version 6.4 +// Terraform Plugin RPC protocol version 6.5 // -// This file defines version 6.4 of the RPC protocol. To implement a plugin +// This file defines version 6.5 of the RPC protocol. To implement a plugin // against this protocol, copy this definition into your own codebase and // use protoc to generate stubs for your target language. // @@ -53,6 +53,8 @@ const ( Provider_ApplyResourceChange_FullMethodName = "/tfplugin6.Provider/ApplyResourceChange" Provider_ImportResourceState_FullMethodName = "/tfplugin6.Provider/ImportResourceState" Provider_ReadDataSource_FullMethodName = "/tfplugin6.Provider/ReadDataSource" + Provider_GetFunctions_FullMethodName = "/tfplugin6.Provider/GetFunctions" + Provider_CallFunction_FullMethodName = "/tfplugin6.Provider/CallFunction" Provider_StopProvider_FullMethodName = "/tfplugin6.Provider/StopProvider" ) @@ -81,6 +83,11 @@ type ProviderClient interface { ApplyResourceChange(ctx context.Context, in *ApplyResourceChange_Request, opts ...grpc.CallOption) (*ApplyResourceChange_Response, error) ImportResourceState(ctx context.Context, in *ImportResourceState_Request, opts ...grpc.CallOption) (*ImportResourceState_Response, error) ReadDataSource(ctx context.Context, in *ReadDataSource_Request, opts ...grpc.CallOption) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) // ////// Graceful Shutdown StopProvider(ctx context.Context, in *StopProvider_Request, opts ...grpc.CallOption) (*StopProvider_Response, error) } @@ -201,6 +208,24 @@ func (c *providerClient) ReadDataSource(ctx context.Context, in *ReadDataSource_ return out, nil } +func (c *providerClient) GetFunctions(ctx context.Context, in *GetFunctions_Request, opts ...grpc.CallOption) (*GetFunctions_Response, error) { + out := new(GetFunctions_Response) + err := c.cc.Invoke(ctx, Provider_GetFunctions_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *providerClient) CallFunction(ctx context.Context, in *CallFunction_Request, opts ...grpc.CallOption) (*CallFunction_Response, error) { + out := new(CallFunction_Response) + err := c.cc.Invoke(ctx, Provider_CallFunction_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *providerClient) StopProvider(ctx context.Context, in *StopProvider_Request, opts ...grpc.CallOption) (*StopProvider_Response, error) { out := new(StopProvider_Response) err := c.cc.Invoke(ctx, Provider_StopProvider_FullMethodName, in, out, opts...) @@ -235,6 +260,11 @@ type ProviderServer interface { ApplyResourceChange(context.Context, *ApplyResourceChange_Request) (*ApplyResourceChange_Response, error) ImportResourceState(context.Context, *ImportResourceState_Request) (*ImportResourceState_Response, error) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) + // GetFunctions returns the definitions of all functions. + GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) + // CallFunction runs the provider-defined function logic and returns + // the result with any diagnostics. + CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) // ////// Graceful Shutdown StopProvider(context.Context, *StopProvider_Request) (*StopProvider_Response, error) mustEmbedUnimplementedProviderServer() @@ -280,6 +310,12 @@ func (UnimplementedProviderServer) ImportResourceState(context.Context, *ImportR func (UnimplementedProviderServer) ReadDataSource(context.Context, *ReadDataSource_Request) (*ReadDataSource_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method ReadDataSource not implemented") } +func (UnimplementedProviderServer) GetFunctions(context.Context, *GetFunctions_Request) (*GetFunctions_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetFunctions not implemented") +} +func (UnimplementedProviderServer) CallFunction(context.Context, *CallFunction_Request) (*CallFunction_Response, error) { + return nil, status.Errorf(codes.Unimplemented, "method CallFunction not implemented") +} func (UnimplementedProviderServer) StopProvider(context.Context, *StopProvider_Request) (*StopProvider_Response, error) { return nil, status.Errorf(codes.Unimplemented, "method StopProvider not implemented") } @@ -512,6 +548,42 @@ func _Provider_ReadDataSource_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _Provider_GetFunctions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetFunctions_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).GetFunctions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_GetFunctions_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).GetFunctions(ctx, req.(*GetFunctions_Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _Provider_CallFunction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CallFunction_Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ProviderServer).CallFunction(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: Provider_CallFunction_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ProviderServer).CallFunction(ctx, req.(*CallFunction_Request)) + } + return interceptor(ctx, in, info, handler) +} + func _Provider_StopProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(StopProvider_Request) if err := dec(in); err != nil { @@ -585,6 +657,14 @@ var Provider_ServiceDesc = grpc.ServiceDesc{ MethodName: "ReadDataSource", Handler: _Provider_ReadDataSource_Handler, }, + { + MethodName: "GetFunctions", + Handler: _Provider_GetFunctions_Handler, + }, + { + MethodName: "CallFunction", + Handler: _Provider_CallFunction_Handler, + }, { MethodName: "StopProvider", Handler: _Provider_StopProvider_Handler, diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go index 4144222ce4a..1c334b74bc8 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/diagnostic.go @@ -9,9 +9,10 @@ import ( func Diagnostic(in *tfprotov6.Diagnostic) (*tfplugin6.Diagnostic, error) { diag := &tfplugin6.Diagnostic{ - Severity: Diagnostic_Severity(in.Severity), - Summary: forceValidUTF8(in.Summary), - Detail: forceValidUTF8(in.Detail), + Severity: Diagnostic_Severity(in.Severity), + Summary: forceValidUTF8(in.Summary), + Detail: forceValidUTF8(in.Detail), + FunctionArgument: in.FunctionArgument, } if in.Attribute != nil { attr, err := AttributePath(in.Attribute) diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go new file mode 100644 index 00000000000..a646facaba5 --- /dev/null +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/function.go @@ -0,0 +1,174 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package toproto + +import ( + "fmt" + + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/tfplugin6" +) + +func CallFunction_Response(in *tfprotov6.CallFunctionResponse) (*tfplugin6.CallFunction_Response, error) { + if in == nil { + return nil, nil + } + + diags, err := Diagnostics(in.Diagnostics) + + if err != nil { + return nil, err + } + + resp := &tfplugin6.CallFunction_Response{ + Diagnostics: diags, + } + + if in.Result != nil { + resp.Result = DynamicValue(in.Result) + } + + return resp, nil +} + +func Function(in *tfprotov6.Function) (*tfplugin6.Function, error) { + if in == nil { + return nil, nil + } + + resp := &tfplugin6.Function{ + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + DeprecationMessage: in.DeprecationMessage, + Parameters: make([]*tfplugin6.Function_Parameter, 0, len(in.Parameters)), + Summary: in.Summary, + } + + for position, parameter := range in.Parameters { + if parameter == nil { + return nil, fmt.Errorf("missing function parameter definition at position: %d", position) + } + + functionParameter, err := Function_Parameter(parameter) + + if err != nil { + return nil, fmt.Errorf("unable to marshal function parameter at position %d: %w", position, err) + } + + resp.Parameters = append(resp.Parameters, functionParameter) + } + + if in.Return == nil { + return nil, fmt.Errorf("missing function return definition") + } + + functionReturn, err := Function_Return(in.Return) + + if err != nil { + return nil, fmt.Errorf("unable to marshal function return: %w", err) + } + + resp.Return = functionReturn + + if in.VariadicParameter != nil { + variadicParameter, err := Function_Parameter(in.VariadicParameter) + + if err != nil { + return nil, fmt.Errorf("unable to marshal variadic function parameter: %w", err) + } + + resp.VariadicParameter = variadicParameter + } + + return resp, nil +} + +func Function_Parameter(in *tfprotov6.FunctionParameter) (*tfplugin6.Function_Parameter, error) { + if in == nil { + return nil, nil + } + + resp := &tfplugin6.Function_Parameter{ + AllowNullValue: in.AllowNullValue, + AllowUnknownValues: in.AllowUnknownValues, + Description: in.Description, + DescriptionKind: StringKind(in.DescriptionKind), + Name: in.Name, + } + + if in.Type == nil { + return nil, fmt.Errorf("missing function parameter type definition") + } + + ctyType, err := CtyType(in.Type) + + if err != nil { + return resp, fmt.Errorf("error marshaling function parameter type: %w", err) + } + + resp.Type = ctyType + + return resp, nil +} + +func Function_Return(in *tfprotov6.FunctionReturn) (*tfplugin6.Function_Return, error) { + if in == nil { + return nil, nil + } + + resp := &tfplugin6.Function_Return{} + + if in.Type == nil { + return nil, fmt.Errorf("missing function return type definition") + } + + ctyType, err := CtyType(in.Type) + + if err != nil { + return resp, fmt.Errorf("error marshaling function return type: %w", err) + } + + resp.Type = ctyType + + return resp, nil +} + +func GetFunctions_Response(in *tfprotov6.GetFunctionsResponse) (*tfplugin6.GetFunctions_Response, error) { + if in == nil { + return nil, nil + } + + diags, err := Diagnostics(in.Diagnostics) + + if err != nil { + return nil, err + } + + resp := &tfplugin6.GetFunctions_Response{ + Diagnostics: diags, + Functions: make(map[string]*tfplugin6.Function, len(in.Functions)), + } + + for name, functionPtr := range in.Functions { + function, err := Function(functionPtr) + + if err != nil { + return nil, fmt.Errorf("error marshaling function definition for %q: %w", name, err) + } + + resp.Functions[name] = function + } + + return resp, nil +} + +func GetMetadata_FunctionMetadata(in *tfprotov6.FunctionMetadata) *tfplugin6.GetMetadata_FunctionMetadata { + if in == nil { + return nil + } + + return &tfplugin6.GetMetadata_FunctionMetadata{ + Name: in.Name, + } +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go index 40db1bc90b7..7b33195a7bb 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/internal/toproto/provider.go @@ -18,6 +18,7 @@ func GetMetadata_Response(in *tfprotov6.GetMetadataResponse) (*tfplugin6.GetMeta resp := &tfplugin6.GetMetadata_Response{ DataSources: make([]*tfplugin6.GetMetadata_DataSourceMetadata, 0, len(in.DataSources)), + Functions: make([]*tfplugin6.GetMetadata_FunctionMetadata, 0, len(in.Functions)), Resources: make([]*tfplugin6.GetMetadata_ResourceMetadata, 0, len(in.Resources)), ServerCapabilities: ServerCapabilities(in.ServerCapabilities), } @@ -26,6 +27,10 @@ func GetMetadata_Response(in *tfprotov6.GetMetadataResponse) (*tfplugin6.GetMeta resp.DataSources = append(resp.DataSources, GetMetadata_DataSourceMetadata(&datasource)) } + for _, function := range in.Functions { + resp.Functions = append(resp.Functions, GetMetadata_FunctionMetadata(&function)) + } + for _, resource := range in.Resources { resp.Resources = append(resp.Resources, GetMetadata_ResourceMetadata(&resource)) } @@ -50,6 +55,9 @@ func GetProviderSchema_Response(in *tfprotov6.GetProviderSchemaResponse) (*tfplu return nil, nil } resp := tfplugin6.GetProviderSchema_Response{ + DataSourceSchemas: make(map[string]*tfplugin6.Schema, len(in.DataSourceSchemas)), + Functions: make(map[string]*tfplugin6.Function, len(in.Functions)), + ResourceSchemas: make(map[string]*tfplugin6.Schema, len(in.ResourceSchemas)), ServerCapabilities: ServerCapabilities(in.ServerCapabilities), } if in.Provider != nil { @@ -66,7 +74,7 @@ func GetProviderSchema_Response(in *tfprotov6.GetProviderSchemaResponse) (*tfplu } resp.ProviderMeta = schema } - resp.ResourceSchemas = make(map[string]*tfplugin6.Schema, len(in.ResourceSchemas)) + for k, v := range in.ResourceSchemas { if v == nil { resp.ResourceSchemas[k] = nil @@ -78,7 +86,7 @@ func GetProviderSchema_Response(in *tfprotov6.GetProviderSchemaResponse) (*tfplu } resp.ResourceSchemas[k] = schema } - resp.DataSourceSchemas = make(map[string]*tfplugin6.Schema, len(in.DataSourceSchemas)) + for k, v := range in.DataSourceSchemas { if v == nil { resp.DataSourceSchemas[k] = nil @@ -90,6 +98,22 @@ func GetProviderSchema_Response(in *tfprotov6.GetProviderSchemaResponse) (*tfplu } resp.DataSourceSchemas[k] = schema } + + for name, functionPtr := range in.Functions { + if functionPtr == nil { + resp.Functions[name] = nil + continue + } + + function, err := Function(functionPtr) + + if err != nil { + return &resp, fmt.Errorf("error marshaling function definition for %q: %w", name, err) + } + + resp.Functions[name] = function + } + diags, err := Diagnostics(in.Diagnostics) if err != nil { return &resp, err diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go index 6f3f9d974d4..e1ea384de3f 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/provider.go @@ -47,6 +47,16 @@ type ProviderServer interface { // data source is to terraform-plugin-go, so they're their own // interface that is composed into ProviderServer. DataSourceServer + + // FunctionServer is an interface encapsulating all the function-related RPC + // requests. ProviderServer implementations must implement them, but they + // are a handy interface for defining what a function is to + // terraform-plugin-go, so they are their own interface that is composed + // into ProviderServer. + // + // This will be required in an upcoming release. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // FunctionServer } // GetMetadataRequest represents a GetMetadata RPC request. @@ -66,6 +76,9 @@ type GetMetadataResponse struct { // DataSources returns metadata for all data resources. DataSources []DataSourceMetadata + // Functions returns metadata for all functions. + Functions []FunctionMetadata + // Resources returns metadata for all managed resources. Resources []ResourceMetadata } @@ -106,6 +119,14 @@ type GetProviderSchemaResponse struct { // `data` in a user's configuration. DataSourceSchemas map[string]*Schema + // Functions is a map of function names to their definition. + // + // Unlike data resources and managed resources, the name should NOT be + // prefixed with the provider name and an underscore. Configuration + // references to functions use a separate namespacing syntax that already + // includes the provider name. + Functions map[string]*Function + // Diagnostics report errors or warnings related to returning the // provider's schemas. Returning an empty slice indicates success, with // no errors or warnings generated. diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go index 6b42836e213..ca348d052e9 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server/server.go @@ -901,3 +901,123 @@ func (s *server) ImportResourceState(ctx context.Context, req *tfplugin6.ImportR } return ret, nil } + +func (s *server) CallFunction(ctx context.Context, protoReq *tfplugin6.CallFunction_Request) (*tfplugin6.CallFunction_Response, error) { + rpc := "CallFunction" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and error in preference of s.downstream.CallFunction + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov6.FunctionServer) + + if !ok { + logging.ProtocolError(ctx, "ProviderServer does not implement FunctionServer") + + protoResp := &tfplugin6.CallFunction_Response{ + Diagnostics: []*tfplugin6.Diagnostic{ + { + Severity: tfplugin6.Diagnostic_ERROR, + Summary: "Provider Functions Not Implemented", + Detail: "A provider-defined function call was received by the provider, however the provider does not implement functions. " + + "Either upgrade the provider to a version that implements provider-defined functions or this is a bug in Terraform that should be reported to the Terraform maintainers.", + }, + }, + } + + return protoResp, nil + } + + req, err := fromproto.CallFunctionRequest(protoReq) + + if err != nil { + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]any{logging.KeyError: err}) + + return nil, err + } + + for position, argument := range req.Arguments { + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Request", fmt.Sprintf("Arguments_%d", position), argument) + } + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.CallFunction(ctx, req) + resp, err := functionServer.CallFunction(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + logging.ProtocolData(ctx, s.protocolDataDir, rpc, "Response", "Result", resp.Result) + + protoResp, err := toproto.CallFunction_Response(resp) + + if err != nil { + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]any{logging.KeyError: err}) + return nil, err + } + + return protoResp, nil +} + +func (s *server) GetFunctions(ctx context.Context, protoReq *tfplugin6.GetFunctions_Request) (*tfplugin6.GetFunctions_Response, error) { + rpc := "GetFunctions" + ctx = s.loggingContext(ctx) + ctx = logging.RpcContext(ctx, rpc) + ctx = s.stoppableContext(ctx) + logging.ProtocolTrace(ctx, "Received request") + defer logging.ProtocolTrace(ctx, "Served request") + + // Remove this check and response in preference of s.downstream.GetFunctions + // below once ProviderServer interface requires FunctionServer. + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + functionServer, ok := s.downstream.(tfprotov6.FunctionServer) + + if !ok { + logging.ProtocolWarn(ctx, "ProviderServer does not implement FunctionServer") + + protoResp := &tfplugin6.GetFunctions_Response{ + Functions: map[string]*tfplugin6.Function{}, + } + + return protoResp, nil + } + + req, err := fromproto.GetFunctionsRequest(protoReq) + + if err != nil { + logging.ProtocolError(ctx, "Error converting request from protobuf", map[string]any{logging.KeyError: err}) + + return nil, err + } + + ctx = tf6serverlogging.DownstreamRequest(ctx) + + // Reference: https://github.com/hashicorp/terraform-plugin-go/issues/353 + // resp, err := s.downstream.GetFunctions(ctx, req) + resp, err := functionServer.GetFunctions(ctx, req) + + if err != nil { + logging.ProtocolError(ctx, "Error from downstream", map[string]any{logging.KeyError: err}) + return nil, err + } + + tf6serverlogging.DownstreamResponse(ctx, resp.Diagnostics) + + protoResp, err := toproto.GetFunctions_Response(resp) + + if err != nil { + logging.ProtocolError(ctx, "Error converting response to protobuf", map[string]any{logging.KeyError: err}) + return nil, err + } + + return protoResp, nil +} diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go index 287914683f2..4ba774a1f26 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema/grpc_provider.go @@ -80,6 +80,7 @@ func (s *GRPCProviderServer) GetMetadata(ctx context.Context, req *tfprotov5.Get resp := &tfprotov5.GetMetadataResponse{ DataSources: make([]tfprotov5.DataSourceMetadata, 0, len(s.provider.DataSourcesMap)), + Functions: make([]tfprotov5.FunctionMetadata, 0), Resources: make([]tfprotov5.ResourceMetadata, 0, len(s.provider.ResourcesMap)), ServerCapabilities: s.serverCapabilities(), } @@ -106,6 +107,7 @@ func (s *GRPCProviderServer) GetProviderSchema(ctx context.Context, req *tfproto resp := &tfprotov5.GetProviderSchemaResponse{ DataSourceSchemas: make(map[string]*tfprotov5.Schema, len(s.provider.DataSourcesMap)), + Functions: make(map[string]*tfprotov5.Function, 0), ResourceSchemas: make(map[string]*tfprotov5.Schema, len(s.provider.ResourcesMap)), ServerCapabilities: s.serverCapabilities(), } @@ -1271,6 +1273,36 @@ func (s *GRPCProviderServer) ReadDataSource(ctx context.Context, req *tfprotov5. return resp, nil } +func (s *GRPCProviderServer) CallFunction(ctx context.Context, req *tfprotov5.CallFunctionRequest) (*tfprotov5.CallFunctionResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Returning error for provider function call") + + resp := &tfprotov5.CallFunctionResponse{ + Diagnostics: []*tfprotov5.Diagnostic{ + { + Severity: tfprotov5.DiagnosticSeverityError, + Summary: "Function Not Found", + Detail: fmt.Sprintf("No function named %q was found in the provider.", req.Name), + }, + }, + } + + return resp, nil +} + +func (s *GRPCProviderServer) GetFunctions(ctx context.Context, req *tfprotov5.GetFunctionsRequest) (*tfprotov5.GetFunctionsResponse, error) { + ctx = logging.InitContext(ctx) + + logging.HelperSchemaTrace(ctx, "Getting provider functions") + + resp := &tfprotov5.GetFunctionsResponse{ + Functions: make(map[string]*tfprotov5.Function, 0), + } + + return resp, nil +} + func pathToAttributePath(path cty.Path) *tftypes.AttributePath { var steps []tftypes.AttributePathStep diff --git a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go index f477d268e25..627eec58527 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/terraform-plugin-sdk/v2/meta/meta.go @@ -17,7 +17,7 @@ import ( // // Deprecated: Use Go standard library [runtime/debug] package build information // instead. -var SDKVersion = "2.30.0" +var SDKVersion = "2.31.0" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/.ci/providerlint/vendor/github.com/hashicorp/yamux/addr.go b/.ci/providerlint/vendor/github.com/hashicorp/yamux/addr.go index be6ebca9c78..f6a00199cdd 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/yamux/addr.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/yamux/addr.go @@ -54,7 +54,7 @@ func (s *Stream) LocalAddr() net.Addr { return s.session.LocalAddr() } -// LocalAddr returns the remote address +// RemoteAddr returns the remote address func (s *Stream) RemoteAddr() net.Addr { return s.session.RemoteAddr() } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/yamux/const.go b/.ci/providerlint/vendor/github.com/hashicorp/yamux/const.go index 4f52938287f..2fdbf844a8e 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/yamux/const.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/yamux/const.go @@ -5,6 +5,25 @@ import ( "fmt" ) +// NetError implements net.Error +type NetError struct { + err error + timeout bool + temporary bool +} + +func (e *NetError) Error() string { + return e.err.Error() +} + +func (e *NetError) Timeout() bool { + return e.timeout +} + +func (e *NetError) Temporary() bool { + return e.temporary +} + var ( // ErrInvalidVersion means we received a frame with an // invalid version @@ -30,7 +49,13 @@ var ( ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") // ErrTimeout is used when we reach an IO deadline - ErrTimeout = fmt.Errorf("i/o deadline reached") + ErrTimeout = &NetError{ + err: fmt.Errorf("i/o deadline reached"), + + // Error should meet net.Error interface for timeouts for compatability + // with standard library expectations, such as http servers. + timeout: true, + } // ErrStreamClosed is returned when using a closed stream ErrStreamClosed = fmt.Errorf("stream closed") diff --git a/.ci/providerlint/vendor/github.com/hashicorp/yamux/mux.go b/.ci/providerlint/vendor/github.com/hashicorp/yamux/mux.go index 18a078c8ad9..0c3e67b022a 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/yamux/mux.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/yamux/mux.go @@ -31,6 +31,20 @@ type Config struct { // window size that we allow for a stream. MaxStreamWindowSize uint32 + // StreamOpenTimeout is the maximum amount of time that a stream will + // be allowed to remain in pending state while waiting for an ack from the peer. + // Once the timeout is reached the session will be gracefully closed. + // A zero value disables the StreamOpenTimeout allowing unbounded + // blocking on OpenStream calls. + StreamOpenTimeout time.Duration + + // StreamCloseTimeout is the maximum time that a stream will allowed to + // be in a half-closed state when `Close` is called before forcibly + // closing the connection. Forcibly closed connections will empty the + // receive buffer, drop any future packets received for that stream, + // and send a RST to the remote side. + StreamCloseTimeout time.Duration + // LogOutput is used to control the log destination. Either Logger or // LogOutput can be set, not both. LogOutput io.Writer @@ -48,6 +62,8 @@ func DefaultConfig() *Config { KeepAliveInterval: 30 * time.Second, ConnectionWriteTimeout: 10 * time.Second, MaxStreamWindowSize: initialStreamWindow, + StreamCloseTimeout: 5 * time.Minute, + StreamOpenTimeout: 75 * time.Second, LogOutput: os.Stderr, } } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/yamux/session.go b/.ci/providerlint/vendor/github.com/hashicorp/yamux/session.go index a80ddec35ea..38fe3ed1f06 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/yamux/session.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/yamux/session.go @@ -2,6 +2,7 @@ package yamux import ( "bufio" + "bytes" "fmt" "io" "io/ioutil" @@ -63,24 +64,27 @@ type Session struct { // sendCh is used to mark a stream as ready to send, // or to send a header out directly. - sendCh chan sendReady + sendCh chan *sendReady // recvDoneCh is closed when recv() exits to avoid a race // between stream registration and stream shutdown recvDoneCh chan struct{} + sendDoneCh chan struct{} // shutdown is used to safely close a session - shutdown bool - shutdownErr error - shutdownCh chan struct{} - shutdownLock sync.Mutex + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex + shutdownErrLock sync.Mutex } // sendReady is used to either mark a stream as ready // or to directly send a header type sendReady struct { Hdr []byte - Body io.Reader + mu sync.Mutex // Protects Body from unsafe reads. + Body []byte Err chan error } @@ -101,8 +105,9 @@ func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { inflight: make(map[uint32]struct{}), synCh: make(chan struct{}, config.AcceptBacklog), acceptCh: make(chan *Stream, config.AcceptBacklog), - sendCh: make(chan sendReady, 64), + sendCh: make(chan *sendReady, 64), recvDoneCh: make(chan struct{}), + sendDoneCh: make(chan struct{}), shutdownCh: make(chan struct{}), } if client { @@ -184,6 +189,10 @@ GET_ID: s.inflight[id] = struct{}{} s.streamLock.Unlock() + if s.config.StreamOpenTimeout > 0 { + go s.setOpenTimeout(stream) + } + // Send the window update to create if err := stream.sendWindowUpdate(); err != nil { select { @@ -196,6 +205,27 @@ GET_ID: return stream, nil } +// setOpenTimeout implements a timeout for streams that are opened but not established. +// If the StreamOpenTimeout is exceeded we assume the peer is unable to ACK, +// and close the session. +// The number of running timers is bounded by the capacity of the synCh. +func (s *Session) setOpenTimeout(stream *Stream) { + timer := time.NewTimer(s.config.StreamOpenTimeout) + defer timer.Stop() + + select { + case <-stream.establishCh: + return + case <-s.shutdownCh: + return + case <-timer.C: + // Timeout reached while waiting for ACK. + // Close the session to force connection re-establishment. + s.logger.Printf("[ERR] yamux: aborted stream open (destination=%s): %v", s.RemoteAddr().String(), ErrTimeout.err) + s.Close() + } +} + // Accept is used to block until the next available stream // is ready to be accepted. func (s *Session) Accept() (net.Conn, error) { @@ -230,10 +260,15 @@ func (s *Session) Close() error { return nil } s.shutdown = true + + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = ErrSessionShutdown } + s.shutdownErrLock.Unlock() + close(s.shutdownCh) + s.conn.Close() <-s.recvDoneCh @@ -242,17 +277,18 @@ func (s *Session) Close() error { for _, stream := range s.streams { stream.forceClose() } + <-s.sendDoneCh return nil } // exitErr is used to handle an error that is causing the // session to terminate. func (s *Session) exitErr(err error) { - s.shutdownLock.Lock() + s.shutdownErrLock.Lock() if s.shutdownErr == nil { s.shutdownErr = err } - s.shutdownLock.Unlock() + s.shutdownErrLock.Unlock() s.Close() } @@ -327,7 +363,7 @@ func (s *Session) keepalive() { } // waitForSendErr waits to send a header, checking for a potential shutdown -func (s *Session) waitForSend(hdr header, body io.Reader) error { +func (s *Session) waitForSend(hdr header, body []byte) error { errCh := make(chan error, 1) return s.waitForSendErr(hdr, body, errCh) } @@ -335,7 +371,7 @@ func (s *Session) waitForSend(hdr header, body io.Reader) error { // waitForSendErr waits to send a header with optional data, checking for a // potential shutdown. Since there's the expectation that sends can happen // in a timely manner, we enforce the connection write timeout here. -func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { +func (s *Session) waitForSendErr(hdr header, body []byte, errCh chan error) error { t := timerPool.Get() timer := t.(*time.Timer) timer.Reset(s.config.ConnectionWriteTimeout) @@ -348,7 +384,7 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e timerPool.Put(t) }() - ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + ready := &sendReady{Hdr: hdr, Body: body, Err: errCh} select { case s.sendCh <- ready: case <-s.shutdownCh: @@ -357,12 +393,34 @@ func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) e return ErrConnectionWriteTimeout } + bodyCopy := func() { + if body == nil { + return // A nil body is ignored. + } + + // In the event of session shutdown or connection write timeout, + // we need to prevent `send` from reading the body buffer after + // returning from this function since the caller may re-use the + // underlying array. + ready.mu.Lock() + defer ready.mu.Unlock() + + if ready.Body == nil { + return // Body was already copied in `send`. + } + newBody := make([]byte, len(body)) + copy(newBody, body) + ready.Body = newBody + } + select { case err := <-errCh: return err case <-s.shutdownCh: + bodyCopy() return ErrSessionShutdown case <-timer.C: + bodyCopy() return ErrConnectionWriteTimeout } } @@ -384,7 +442,7 @@ func (s *Session) sendNoWait(hdr header) error { }() select { - case s.sendCh <- sendReady{Hdr: hdr}: + case s.sendCh <- &sendReady{Hdr: hdr}: return nil case <-s.shutdownCh: return ErrSessionShutdown @@ -395,39 +453,59 @@ func (s *Session) sendNoWait(hdr header) error { // send is a long running goroutine that sends data func (s *Session) send() { + if err := s.sendLoop(); err != nil { + s.exitErr(err) + } +} + +func (s *Session) sendLoop() error { + defer close(s.sendDoneCh) + var bodyBuf bytes.Buffer for { + bodyBuf.Reset() + select { case ready := <-s.sendCh: // Send a header if ready if ready.Hdr != nil { - sent := 0 - for sent < len(ready.Hdr) { - n, err := s.conn.Write(ready.Hdr[sent:]) - if err != nil { - s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) - asyncSendErr(ready.Err, err) - s.exitErr(err) - return - } - sent += n + _, err := s.conn.Write(ready.Hdr) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + return err } } - // Send data from a body if given + ready.mu.Lock() if ready.Body != nil { - _, err := io.Copy(s.conn, ready.Body) + // Copy the body into the buffer to avoid + // holding a mutex lock during the write. + _, err := bodyBuf.Write(ready.Body) + if err != nil { + ready.Body = nil + ready.mu.Unlock() + s.logger.Printf("[ERR] yamux: Failed to copy body into buffer: %v", err) + asyncSendErr(ready.Err, err) + return err + } + ready.Body = nil + } + ready.mu.Unlock() + + if bodyBuf.Len() > 0 { + // Send data from a body if given + _, err := s.conn.Write(bodyBuf.Bytes()) if err != nil { s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) asyncSendErr(ready.Err, err) - s.exitErr(err) - return + return err } } // No error, successful send asyncSendErr(ready.Err, nil) case <-s.shutdownCh: - return + return nil } } } @@ -614,8 +692,9 @@ func (s *Session) incomingStream(id uint32) error { // Backlog exceeded! RST the stream s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") delete(s.streams, id) - stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) - return s.sendNoWait(stream.sendHdr) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) } } diff --git a/.ci/providerlint/vendor/github.com/hashicorp/yamux/stream.go b/.ci/providerlint/vendor/github.com/hashicorp/yamux/stream.go index aa239197398..23d08fcc8da 100644 --- a/.ci/providerlint/vendor/github.com/hashicorp/yamux/stream.go +++ b/.ci/providerlint/vendor/github.com/hashicorp/yamux/stream.go @@ -2,6 +2,7 @@ package yamux import ( "bytes" + "errors" "io" "sync" "sync/atomic" @@ -49,6 +50,13 @@ type Stream struct { readDeadline atomic.Value // time.Time writeDeadline atomic.Value // time.Time + + // establishCh is notified if the stream is established or being closed. + establishCh chan struct{} + + // closeTimer is set with stateLock held to honor the StreamCloseTimeout + // setting on Session. + closeTimer *time.Timer } // newStream is used to construct a new stream within @@ -66,6 +74,7 @@ func newStream(session *Session, id uint32, state streamState) *Stream { sendWindow: initialStreamWindow, recvNotifyCh: make(chan struct{}, 1), sendNotifyCh: make(chan struct{}, 1), + establishCh: make(chan struct{}, 1), } s.readDeadline.Store(time.Time{}) s.writeDeadline.Store(time.Time{}) @@ -119,6 +128,9 @@ START: // Send a window update potentially err = s.sendWindowUpdate() + if err == ErrSessionShutdown { + err = nil + } return n, err WAIT: @@ -161,7 +173,7 @@ func (s *Stream) Write(b []byte) (n int, err error) { func (s *Stream) write(b []byte) (n int, err error) { var flags uint16 var max uint32 - var body io.Reader + var body []byte START: s.stateLock.Lock() switch s.state { @@ -187,11 +199,15 @@ START: // Send up to our send window max = min(window, uint32(len(b))) - body = bytes.NewReader(b[:max]) + body = b[:max] // Send the header s.sendHdr.encode(typeData, flags, s.id, max) if err = s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.sendHdr = header(make([]byte, headerSize)) + } return 0, err } @@ -265,6 +281,10 @@ func (s *Stream) sendWindowUpdate() error { // Send the header s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -279,6 +299,10 @@ func (s *Stream) sendClose() error { flags |= flagFIN s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + if errors.Is(err, ErrSessionShutdown) || errors.Is(err, ErrConnectionWriteTimeout) { + // Message left in ready queue, header re-use is unsafe. + s.controlHdr = header(make([]byte, headerSize)) + } return err } return nil @@ -312,6 +336,27 @@ func (s *Stream) Close() error { s.stateLock.Unlock() return nil SEND_CLOSE: + // This shouldn't happen (the more realistic scenario to cancel the + // timer is via processFlags) but just in case this ever happens, we + // cancel the timer to prevent dangling timers. + if s.closeTimer != nil { + s.closeTimer.Stop() + s.closeTimer = nil + } + + // If we have a StreamCloseTimeout set we start the timeout timer. + // We do this only if we're not already closing the stream since that + // means this was a graceful close. + // + // This prevents memory leaks if one side (this side) closes and the + // remote side poorly behaves and never responds with a FIN to complete + // the close. After the specified timeout, we clean our resources up no + // matter what. + if !closeStream && s.session.config.StreamCloseTimeout > 0 { + s.closeTimer = time.AfterFunc( + s.session.config.StreamCloseTimeout, s.closeTimeout) + } + s.stateLock.Unlock() s.sendClose() s.notifyWaiting() @@ -321,6 +366,23 @@ SEND_CLOSE: return nil } +// closeTimeout is called after StreamCloseTimeout during a close to +// close this stream. +func (s *Stream) closeTimeout() { + // Close our side forcibly + s.forceClose() + + // Free the stream from the session map + s.session.closeStream(s.id) + + // Send a RST so the remote side closes too. + s.sendLock.Lock() + defer s.sendLock.Unlock() + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, s.id, 0) + s.session.sendNoWait(hdr) +} + // forceClose is used for when the session is exiting func (s *Stream) forceClose() { s.stateLock.Lock() @@ -332,20 +394,27 @@ func (s *Stream) forceClose() { // processFlags is used to update the state of the stream // based on set flags, if any. Lock must be held func (s *Stream) processFlags(flags uint16) error { + s.stateLock.Lock() + defer s.stateLock.Unlock() + // Close the stream without holding the state lock closeStream := false defer func() { if closeStream { + if s.closeTimer != nil { + // Stop our close timeout timer since we gracefully closed + s.closeTimer.Stop() + } + s.session.closeStream(s.id) } }() - s.stateLock.Lock() - defer s.stateLock.Unlock() if flags&flagACK == flagACK { if s.state == streamSYNSent { s.state = streamEstablished } + asyncNotify(s.establishCh) s.session.establishStream(s.id) } if flags&flagFIN == flagFIN { @@ -378,6 +447,7 @@ func (s *Stream) processFlags(flags uint16) error { func (s *Stream) notifyWaiting() { asyncNotify(s.recvNotifyCh) asyncNotify(s.sendNotifyCh) + asyncNotify(s.establishCh) } // incrSendWindow updates the size of our send window @@ -412,6 +482,7 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { if length > s.recvWindow { s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + s.recvLock.Unlock() return ErrRecvWindowExceeded } @@ -420,14 +491,15 @@ func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { // This way we can read in the whole packet without further allocations. s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) } - if _, err := io.Copy(s.recvBuf, conn); err != nil { + copiedLength, err := io.Copy(s.recvBuf, conn) + if err != nil { s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) s.recvLock.Unlock() return err } // Decrement the receive window - s.recvWindow -= length + s.recvWindow -= uint32(copiedLength) s.recvLock.Unlock() // Unblock any readers @@ -446,15 +518,17 @@ func (s *Stream) SetDeadline(t time.Time) error { return nil } -// SetReadDeadline sets the deadline for future Read calls. +// SetReadDeadline sets the deadline for blocked and future Read calls. func (s *Stream) SetReadDeadline(t time.Time) error { s.readDeadline.Store(t) + asyncNotify(s.recvNotifyCh) return nil } -// SetWriteDeadline sets the deadline for future Write calls +// SetWriteDeadline sets the deadline for blocked and future Write calls func (s *Stream) SetWriteDeadline(t time.Time) error { s.writeDeadline.Store(t) + asyncNotify(s.sendNotifyCh) return nil } diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md index f6b19d5ba40..d45441e6d78 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/CHANGELOG.md @@ -1,6 +1,30 @@ -## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22) +## [5.4.1](https://github.com/vmihailenco/msgpack/compare/v5.4.0...v5.4.1) (2023-10-26) + + +### Bug Fixes + +* **reflect:** not assignable to type ([edeaedd](https://github.com/vmihailenco/msgpack/commit/edeaeddb2d51868df8c6ff2d8a218b527aeaf5fd)) + + + +# [5.4.0](https://github.com/vmihailenco/msgpack/compare/v5.3.6...v5.4.0) (2023-10-01) + +## [5.3.6](https://github.com/vmihailenco/msgpack/compare/v5.3.5...v5.3.6) (2023-10-01) + + +### Features + +* allow overwriting time.Time parsing from extID 13 (for NodeJS Date) ([9a6b73b](https://github.com/vmihailenco/msgpack/commit/9a6b73b3588fd962d568715f4375e24b089f7066)) +* apply omitEmptyFlag to empty structs ([e5f8d03](https://github.com/vmihailenco/msgpack/commit/e5f8d03c0a1dd9cc571d648cd610305139078de5)) +* support sorted keys for map[string]bool ([690c1fa](https://github.com/vmihailenco/msgpack/commit/690c1fab9814fab4842295ea986111f49850d9a4)) + + + +## [5.3.5](https://github.com/vmihailenco/msgpack/compare/v5.3.4...v5.3.5) (2021-10-22) + +- Allow decoding `nil` code as boolean false. ## v5 diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/README.md b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/README.md index 66ad98b9c8d..038464f182c 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/README.md +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/README.md @@ -5,19 +5,18 @@ [![Documentation](https://img.shields.io/badge/msgpack-documentation-informational)](https://msgpack.uptrace.dev/) [![Chat](https://discordapp.com/api/guilds/752070105847955518/widget.png)](https://discord.gg/rWtp5Aj) -> :heart: -> [**Uptrace.dev** - All-in-one tool to optimize performance and monitor errors & logs](https://uptrace.dev/?utm_source=gh-msgpack&utm_campaign=gh-msgpack-var2) +> msgpack is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace). +> Uptrace is an [open source APM](https://uptrace.dev/get/open-source-apm.html) and blazingly fast +> [distributed tracing tool](https://get.uptrace.dev/compare/distributed-tracing-tools.html) powered +> by OpenTelemetry and ClickHouse. Give it a star as well! + +## Resources -- Join [Discord](https://discord.gg/rWtp5Aj) to ask questions. - [Documentation](https://msgpack.uptrace.dev) +- [Chat](https://discord.gg/rWtp5Aj) - [Reference](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5) - [Examples](https://pkg.go.dev/github.com/vmihailenco/msgpack/v5#pkg-examples) -Other projects you may like: - -- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite. -- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go. - ## Features - Primitives, arrays, maps, structs, time.Time and interface{}. @@ -84,3 +83,18 @@ func ExampleMarshal() { // Output: bar } ``` + +## See also + +- [Golang ORM](https://github.com/uptrace/bun) for PostgreSQL, MySQL, MSSQL, and SQLite +- [Golang PostgreSQL](https://bun.uptrace.dev/postgres/) +- [Golang HTTP router](https://github.com/uptrace/bunrouter) +- [Golang ClickHouse ORM](https://github.com/uptrace/go-clickhouse) + +## Contributors + +Thanks to all the people who already contributed! + + + + diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode.go index 5df40e5d9ca..ea645aadb37 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode.go @@ -14,14 +14,16 @@ import ( ) const ( - looseInterfaceDecodingFlag uint32 = 1 << iota - disallowUnknownFieldsFlag + bytesAllocLimit = 1 << 20 // 1mb + sliceAllocLimit = 1e6 // 1m elements + maxMapSize = 1e6 // 1m elements ) const ( - bytesAllocLimit = 1e6 // 1mb - sliceAllocLimit = 1e4 - maxMapSize = 1e6 + looseInterfaceDecodingFlag uint32 = 1 << iota + disallowUnknownFieldsFlag + usePreallocateValues + disableAllocLimitFlag ) type bufReader interface { @@ -53,7 +55,7 @@ func PutDecoder(dec *Decoder) { // in the value pointed to by v. func Unmarshal(data []byte, v interface{}) error { dec := GetDecoder() - + dec.UsePreallocateValues(true) dec.Reset(bytes.NewReader(data)) err := dec.Decode(v) @@ -64,16 +66,14 @@ func Unmarshal(data []byte, v interface{}) error { // A Decoder reads and decodes MessagePack values from an input stream. type Decoder struct { - r io.Reader - s io.ByteScanner - buf []byte - - rec []byte // accumulates read data if not nil - + r io.Reader + s io.ByteScanner + mapDecoder func(*Decoder) (interface{}, error) + structTag string + buf []byte + rec []byte dict []string flags uint32 - structTag string - mapDecoder func(*Decoder) (interface{}, error) } // NewDecoder returns a new decoder that reads from r. @@ -95,10 +95,9 @@ func (d *Decoder) Reset(r io.Reader) { // ResetDict is like Reset, but also resets the dict. func (d *Decoder) ResetDict(r io.Reader, dict []string) { - d.resetReader(r) + d.ResetReader(r) d.flags = 0 d.structTag = "" - d.mapDecoder = nil d.dict = dict } @@ -110,10 +109,16 @@ func (d *Decoder) WithDict(dict []string, fn func(*Decoder) error) error { return err } -func (d *Decoder) resetReader(r io.Reader) { +func (d *Decoder) ResetReader(r io.Reader) { + d.mapDecoder = nil + d.dict = nil + if br, ok := r.(bufReader); ok { d.r = br d.s = br + } else if r == nil { + d.r = nil + d.s = nil } else { br := bufio.NewReader(r) d.r = br @@ -161,6 +166,24 @@ func (d *Decoder) UseInternedStrings(on bool) { } } +// UsePreallocateValues enables preallocating values in chunks +func (d *Decoder) UsePreallocateValues(on bool) { + if on { + d.flags |= usePreallocateValues + } else { + d.flags &= ^usePreallocateValues + } +} + +// DisableAllocLimit enables fully allocating slices/maps when the size is known +func (d *Decoder) DisableAllocLimit(on bool) { + if on { + d.flags |= disableAllocLimitFlag + } else { + d.flags &= ^disableAllocLimitFlag + } +} + // Buffered returns a reader of the data remaining in the Decoder's buffer. // The reader is valid until the next call to Decode. func (d *Decoder) Buffered() io.Reader { @@ -603,7 +626,11 @@ func (d *Decoder) readFull(b []byte) error { func (d *Decoder) readN(n int) ([]byte, error) { var err error - d.buf, err = readN(d.r, d.buf, n) + if d.flags&disableAllocLimitFlag != 0 { + d.buf, err = readN(d.r, d.buf, n) + } else { + d.buf, err = readNGrow(d.r, d.buf, n) + } if err != nil { return nil, err } @@ -615,6 +642,24 @@ func (d *Decoder) readN(n int) ([]byte, error) { } func readN(r io.Reader, b []byte, n int) ([]byte, error) { + if b == nil { + if n == 0 { + return make([]byte, 0), nil + } + b = make([]byte, 0, n) + } + + if n > cap(b) { + b = append(b, make([]byte, n-len(b))...) + } else if n <= cap(b) { + b = b[:n] + } + + _, err := io.ReadFull(r, b) + return b, err +} + +func readNGrow(r io.Reader, b []byte, n int) ([]byte, error) { if b == nil { if n == 0 { return make([]byte, 0), nil diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go index 52e0526cc51..c54dae374fd 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_map.go @@ -13,6 +13,8 @@ var errArrayStruct = errors.New("msgpack: number of fields in array-encoded stru var ( mapStringStringPtrType = reflect.TypeOf((*map[string]string)(nil)) mapStringStringType = mapStringStringPtrType.Elem() + mapStringBoolPtrType = reflect.TypeOf((*map[string]bool)(nil)) + mapStringBoolType = mapStringBoolPtrType.Elem() ) var ( @@ -33,7 +35,11 @@ func decodeMapValue(d *Decoder, v reflect.Value) error { } if v.IsNil() { - v.Set(reflect.MakeMap(typ)) + ln := n + if d.flags&disableAllocLimitFlag == 0 { + ln = min(ln, maxMapSize) + } + v.Set(reflect.MakeMapWithSize(typ, ln)) } if n == 0 { return nil @@ -104,7 +110,11 @@ func (d *Decoder) decodeMapStringStringPtr(ptr *map[string]string) error { m := *ptr if m == nil { - *ptr = make(map[string]string, min(size, maxMapSize)) + ln := size + if d.flags&disableAllocLimitFlag == 0 { + ln = min(size, maxMapSize) + } + *ptr = make(map[string]string, ln) m = *ptr } @@ -147,7 +157,7 @@ func (d *Decoder) DecodeMap() (map[string]interface{}, error) { return nil, nil } - m := make(map[string]interface{}, min(n, maxMapSize)) + m := make(map[string]interface{}, n) for i := 0; i < n; i++ { mk, err := d.DecodeString() @@ -174,7 +184,7 @@ func (d *Decoder) DecodeUntypedMap() (map[interface{}]interface{}, error) { return nil, nil } - m := make(map[interface{}]interface{}, min(n, maxMapSize)) + m := make(map[interface{}]interface{}, n) for i := 0; i < n; i++ { mk, err := d.decodeInterfaceCond() @@ -222,7 +232,13 @@ func (d *Decoder) DecodeTypedMap() (interface{}, error) { } mapType := reflect.MapOf(keyType, valueType) - mapValue := reflect.MakeMap(mapType) + + ln := n + if d.flags&disableAllocLimitFlag == 0 { + ln = min(ln, maxMapSize) + } + + mapValue := reflect.MakeMapWithSize(mapType, ln) mapValue.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(value)) n-- @@ -234,17 +250,18 @@ func (d *Decoder) DecodeTypedMap() (interface{}, error) { } func (d *Decoder) decodeTypedMapValue(v reflect.Value, n int) error { - typ := v.Type() - keyType := typ.Key() - valueType := typ.Elem() - + var ( + typ = v.Type() + keyType = typ.Key() + valueType = typ.Elem() + ) for i := 0; i < n; i++ { - mk := reflect.New(keyType).Elem() + mk := d.newValue(keyType).Elem() if err := d.DecodeValue(mk); err != nil { return err } - mv := reflect.New(valueType).Elem() + mv := d.newValue(valueType).Elem() if err := d.DecodeValue(mv); err != nil { return err } diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go index c302ed1f33e..4dce0fe5b97 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_query.go @@ -11,9 +11,8 @@ import ( type queryResult struct { query string key string + values []interface{} hasAsterisk bool - - values []interface{} } func (q *queryResult) nextKey() { diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go index db6f7c5472d..9c155f2ba6b 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_slice.go @@ -49,7 +49,7 @@ func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { return nil } - ss := makeStrings(*ptr, n) + ss := makeStrings(*ptr, n, d.flags&disableAllocLimitFlag != 0) for i := 0; i < n; i++ { s, err := d.DecodeString() if err != nil { @@ -62,8 +62,8 @@ func (d *Decoder) decodeStringSlicePtr(ptr *[]string) error { return nil } -func makeStrings(s []string, n int) []string { - if n > sliceAllocLimit { +func makeStrings(s []string, n int, noLimit bool) []string { + if !noLimit && n > sliceAllocLimit { n = sliceAllocLimit } @@ -101,10 +101,17 @@ func decodeSliceValue(d *Decoder, v reflect.Value) error { v.Set(v.Slice(0, v.Cap())) } + noLimit := d.flags&disableAllocLimitFlag != 1 + + if noLimit && n > v.Len() { + v.Set(growSliceValue(v, n, noLimit)) + } + for i := 0; i < n; i++ { - if i >= v.Len() { - v.Set(growSliceValue(v, n)) + if !noLimit && i >= v.Len() { + v.Set(growSliceValue(v, n, noLimit)) } + elem := v.Index(i) if err := d.DecodeValue(elem); err != nil { return err @@ -114,9 +121,9 @@ func decodeSliceValue(d *Decoder, v reflect.Value) error { return nil } -func growSliceValue(v reflect.Value, n int) reflect.Value { +func growSliceValue(v reflect.Value, n int, noLimit bool) reflect.Value { diff := n - v.Len() - if diff > sliceAllocLimit { + if !noLimit && diff > sliceAllocLimit { diff = sliceAllocLimit } v = reflect.AppendSlice(v, reflect.MakeSlice(v.Type(), diff, diff)) @@ -163,7 +170,7 @@ func (d *Decoder) decodeSlice(c byte) ([]interface{}, error) { return nil, nil } - s := make([]interface{}, 0, min(n, sliceAllocLimit)) + s := make([]interface{}, 0, n) for i := 0; i < n; i++ { v, err := d.decodeInterfaceCond() if err != nil { diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go new file mode 100644 index 00000000000..0b4c1d04aed --- /dev/null +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_typgen.go @@ -0,0 +1,46 @@ +package msgpack + +import ( + "reflect" + "sync" +) + +var cachedValues struct { + m map[reflect.Type]chan reflect.Value + sync.RWMutex +} + +func cachedValue(t reflect.Type) reflect.Value { + cachedValues.RLock() + ch := cachedValues.m[t] + cachedValues.RUnlock() + if ch != nil { + return <-ch + } + + cachedValues.Lock() + defer cachedValues.Unlock() + if ch = cachedValues.m[t]; ch != nil { + return <-ch + } + + ch = make(chan reflect.Value, 256) + go func() { + for { + ch <- reflect.New(t) + } + }() + if cachedValues.m == nil { + cachedValues.m = make(map[reflect.Type]chan reflect.Value, 8) + } + cachedValues.m[t] = ch + return <-ch +} + +func (d *Decoder) newValue(t reflect.Type) reflect.Value { + if d.flags&usePreallocateValues == 0 { + return reflect.New(t) + } + + return cachedValue(t) +} diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go index d2ff2aea50f..c44a674e544 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/decode_value.go @@ -10,6 +10,7 @@ import ( var ( interfaceType = reflect.TypeOf((*interface{})(nil)).Elem() stringType = reflect.TypeOf((*string)(nil)).Elem() + boolType = reflect.TypeOf((*bool)(nil)).Elem() ) var valueDecoders []decoderFunc @@ -127,12 +128,12 @@ func ptrValueDecoder(typ reflect.Type) decoderFunc { return func(d *Decoder, v reflect.Value) error { if d.hasNilCode() { if !v.IsNil() { - v.Set(reflect.Zero(v.Type())) + v.Set(d.newValue(typ).Elem()) } return d.DecodeNil() } if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) + v.Set(d.newValue(typ.Elem())) } return decoder(d, v.Elem()) } @@ -154,7 +155,7 @@ func nilAwareDecoder(typ reflect.Type, fn decoderFunc) decoderFunc { return d.decodeNilValue(v) } if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) + v.Set(d.newValue(typ.Elem())) } return fn(d, v) } diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode.go index 0ef6212e63b..135adc8f37a 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode.go @@ -75,15 +75,12 @@ func Marshal(v interface{}) ([]byte, error) { } type Encoder struct { - w writer - - buf []byte - timeBuf []byte - - dict map[string]int - - flags uint32 + w writer + dict map[string]int structTag string + buf []byte + timeBuf []byte + flags uint32 } // NewEncoder returns a new encoder that writes to w. @@ -107,7 +104,7 @@ func (e *Encoder) Reset(w io.Writer) { // ResetDict is like Reset, but also resets the dict. func (e *Encoder) ResetDict(w io.Writer, dict map[string]int) { - e.resetWriter(w) + e.ResetWriter(w) e.flags = 0 e.structTag = "" e.dict = dict @@ -121,9 +118,12 @@ func (e *Encoder) WithDict(dict map[string]int, fn func(*Encoder) error) error { return err } -func (e *Encoder) resetWriter(w io.Writer) { +func (e *Encoder) ResetWriter(w io.Writer) { + e.dict = nil if bw, ok := w.(writer); ok { e.w = bw + } else if w == nil { + e.w = nil } else { e.w = newByteWriter(w) } @@ -132,6 +132,7 @@ func (e *Encoder) resetWriter(w io.Writer) { // SetSortMapKeys causes the Encoder to encode map keys in increasing order. // Supported map types are: // - map[string]string +// - map[string]bool // - map[string]interface{} func (e *Encoder) SetSortMapKeys(on bool) *Encoder { if on { diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go index ba4c61be72d..a5aa31bb3c8 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_map.go @@ -30,6 +30,32 @@ func encodeMapValue(e *Encoder, v reflect.Value) error { return nil } +func encodeMapStringBoolValue(e *Encoder, v reflect.Value) error { + if v.IsNil() { + return e.EncodeNil() + } + + if err := e.EncodeMapLen(v.Len()); err != nil { + return err + } + + m := v.Convert(mapStringBoolType).Interface().(map[string]bool) + if e.flags&sortMapKeysFlag != 0 { + return e.encodeSortedMapStringBool(m) + } + + for mk, mv := range m { + if err := e.EncodeString(mk); err != nil { + return err + } + if err := e.EncodeBool(mv); err != nil { + return err + } + } + + return nil +} + func encodeMapStringStringValue(e *Encoder, v reflect.Value) error { if v.IsNil() { return e.EncodeNil() @@ -113,6 +139,26 @@ func (e *Encoder) EncodeMapSorted(m map[string]interface{}) error { return nil } +func (e *Encoder) encodeSortedMapStringBool(m map[string]bool) error { + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + err := e.EncodeString(k) + if err != nil { + return err + } + if err = e.EncodeBool(m[k]); err != nil { + return err + } + } + + return nil +} + func (e *Encoder) encodeSortedMapStringString(m map[string]string) error { keys := make([]string, 0, len(m)) for k := range m { @@ -148,7 +194,7 @@ func encodeStructValue(e *Encoder, strct reflect.Value) error { if e.flags&arrayEncodedStructsFlag != 0 || structFields.AsArray { return encodeStructValueAsArray(e, strct, structFields.List) } - fields := structFields.OmitEmpty(strct, e.flags&omitEmptyFlag != 0) + fields := structFields.OmitEmpty(e, strct) if err := e.EncodeMapLen(len(fields)); err != nil { return err diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go index 48cf489fa1f..1d6303a25c9 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/encode_value.go @@ -111,6 +111,8 @@ func _getEncoder(typ reflect.Type) encoderFunc { switch typ.Elem() { case stringType: return encodeMapStringStringValue + case boolType: + return encodeMapStringBoolValue case interfaceType: return encodeMapStringInterfaceValue } @@ -198,6 +200,13 @@ func nilable(kind reflect.Kind) bool { return false } +func nilableType(t reflect.Type) bool { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return nilable(t.Kind()) +} + //------------------------------------------------------------------------------ func marshalBinaryValueAddr(e *Encoder, v reflect.Value) error { diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/ext.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/ext.go index 76e11603d92..354b9d92d79 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/ext.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/ext.go @@ -96,7 +96,7 @@ func makeExtEncoder( func makeExtEncoderAddr(extEncoder encoderFunc) encoderFunc { return func(e *Encoder, v reflect.Value) error { if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + return fmt.Errorf("msgpack: EncodeExt(nonaddressable %T)", v.Interface()) } return extEncoder(e, v.Addr()) } @@ -157,7 +157,7 @@ func makeExtDecoder( func makeExtDecoderAddr(extDecoder decoderFunc) decoderFunc { return func(d *Decoder, v reflect.Value) error { if !v.CanAddr() { - return fmt.Errorf("msgpack: Decode(nonaddressable %T)", v.Interface()) + return fmt.Errorf("msgpack: DecodeExt(nonaddressable %T)", v.Interface()) } return extDecoder(d, v.Addr()) } @@ -254,9 +254,9 @@ func (d *Decoder) decodeInterfaceExt(c byte) (interface{}, error) { return nil, fmt.Errorf("msgpack: unknown ext id=%d", extID) } - v := reflect.New(info.Type).Elem() + v := d.newValue(info.Type).Elem() if nilable(v.Kind()) && v.IsNil() { - v.Set(reflect.New(info.Type.Elem())) + v.Set(d.newValue(info.Type.Elem())) } if err := info.Decoder(d, v, extLen); err != nil { diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/intern.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/intern.go index be0316a83d8..7f019aaacc8 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/intern.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/intern.go @@ -57,18 +57,16 @@ func encodeInternedStringValue(e *Encoder, v reflect.Value) error { func (e *Encoder) encodeInternedString(s string, intern bool) error { // Interned string takes at least 3 bytes. Plain string 1 byte + string len. - if len(s) >= minInternedStringLen { - if idx, ok := e.dict[s]; ok { - return e.encodeInternedStringIndex(idx) - } + if idx, ok := e.dict[s]; ok { + return e.encodeInternedStringIndex(idx) + } - if intern && len(e.dict) < maxDictLen { - if e.dict == nil { - e.dict = make(map[string]int) - } - idx := len(e.dict) - e.dict[s] = idx + if intern && len(s) >= minInternedStringLen && len(e.dict) < maxDictLen { + if e.dict == nil { + e.dict = make(map[string]int) } + idx := len(e.dict) + e.dict[s] = idx } return e.encodeNormalString(s) diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go index 4db2fa2c71d..4fa000b826f 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/msgpack.go @@ -43,8 +43,8 @@ func (m *RawMessage) DecodeMsgpack(dec *Decoder) error { //------------------------------------------------------------------------------ type unexpectedCodeError struct { - code byte hint string + code byte } func (err unexpectedCodeError) Error() string { diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/package.json b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/package.json index 298910d45cf..921f8eab225 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/package.json +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/package.json @@ -1,4 +1,4 @@ { "name": "msgpack", - "version": "5.3.5" + "version": "5.4.1" } diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/time.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/time.go index 44566ec0761..1a4ba126522 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/time.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/time.go @@ -26,6 +26,11 @@ func timeDecoder(d *Decoder, v reflect.Value, extLen int) error { return err } + if tm.IsZero() { + // Zero time does not have timezone information. + tm = tm.UTC() + } + ptr := v.Addr().Interface().(*time.Time) *ptr = tm @@ -103,7 +108,8 @@ func (d *Decoder) DecodeTime() (time.Time, error) { return time.Time{}, err } - if extID != timeExtID { + // NodeJS seems to use extID 13. + if extID != timeExtID && extID != 13 { return time.Time{}, fmt.Errorf("msgpack: invalid time ext id=%d", extID) } diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/types.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/types.go index 69aca611b23..d212e098e7f 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/types.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/types.go @@ -66,8 +66,8 @@ type structCache struct { } type structCacheKey struct { - tag string typ reflect.Type + tag string } func newStructCache() *structCache { @@ -90,19 +90,20 @@ func (m *structCache) Fields(typ reflect.Type, tag string) *fields { //------------------------------------------------------------------------------ type field struct { + encoder encoderFunc + decoder decoderFunc name string index []int omitEmpty bool - encoder encoderFunc - decoder decoderFunc } -func (f *field) Omit(strct reflect.Value, forced bool) bool { +func (f *field) Omit(e *Encoder, strct reflect.Value) bool { v, ok := fieldByIndex(strct, f.index) if !ok { return true } - return (f.omitEmpty || forced) && isEmptyValue(v) + forced := e.flags&omitEmptyFlag != 0 + return (f.omitEmpty || forced) && e.isEmptyValue(v) } func (f *field) EncodeValue(e *Encoder, strct reflect.Value) error { @@ -152,7 +153,8 @@ func (fs *fields) warnIfFieldExists(name string) { } } -func (fs *fields) OmitEmpty(strct reflect.Value, forced bool) []*field { +func (fs *fields) OmitEmpty(e *Encoder, strct reflect.Value) []*field { + forced := e.flags&omitEmptyFlag != 0 if !fs.hasOmitEmpty && !forced { return fs.List } @@ -160,7 +162,7 @@ func (fs *fields) OmitEmpty(strct reflect.Value, forced bool) []*field { fields := make([]*field, 0, len(fs.List)) for _, f := range fs.List { - if !f.Omit(strct, forced) { + if !f.Omit(e, strct) { fields = append(fields, f) } } @@ -317,7 +319,7 @@ type isZeroer interface { IsZero() bool } -func isEmptyValue(v reflect.Value) bool { +func (e *Encoder) isEmptyValue(v reflect.Value) bool { kind := v.Kind() for kind == reflect.Interface { @@ -335,6 +337,10 @@ func isEmptyValue(v reflect.Value) bool { switch kind { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 + case reflect.Struct: + structFields := structs.Fields(v.Type(), e.structTag) + fields := structFields.OmitEmpty(e, v) + return len(fields) == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: @@ -399,7 +405,7 @@ func indirectNil(v reflect.Value) (reflect.Value, bool) { if elemType.Kind() != reflect.Struct { return v, false } - v.Set(reflect.New(elemType)) + v.Set(cachedValue(elemType)) } v = v.Elem() } diff --git a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/version.go b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/version.go index 1d49337c359..ca10205f29e 100644 --- a/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/version.go +++ b/.ci/providerlint/vendor/github.com/vmihailenco/msgpack/v5/version.go @@ -2,5 +2,5 @@ package msgpack // Version is the current release version. func Version() string { - return "5.3.5" + return "5.4.1" } diff --git a/.ci/providerlint/vendor/golang.org/x/mod/modfile/rule.go b/.ci/providerlint/vendor/golang.org/x/mod/modfile/rule.go index e0869fa3868..35fd1f534cf 100644 --- a/.ci/providerlint/vendor/golang.org/x/mod/modfile/rule.go +++ b/.ci/providerlint/vendor/golang.org/x/mod/modfile/rule.go @@ -542,7 +542,7 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V if strings.Contains(ns, "@") { return nil, errorf("replacement module must match format 'path version', not 'path@version'") } - return nil, errorf("replacement module without version must be directory path (rooted or starting with ./ or ../)") + return nil, errorf("replacement module without version must be directory path (rooted or starting with . or ..)") } if filepath.Separator == '/' && strings.Contains(ns, `\`) { return nil, errorf("replacement directory appears to be Windows path (on a non-windows system)") @@ -555,7 +555,6 @@ func parseReplace(filename string, line *Line, verb string, args []string, fix V } if IsDirectoryPath(ns) { return nil, errorf("replacement module directory path %q cannot have version", ns) - } } return &Replace{ @@ -679,14 +678,15 @@ func (f *WorkFile) add(errs *ErrorList, line *Line, verb string, args []string, } } -// IsDirectoryPath reports whether the given path should be interpreted -// as a directory path. Just like on the go command line, relative paths +// IsDirectoryPath reports whether the given path should be interpreted as a directory path. +// Just like on the go command line, relative paths starting with a '.' or '..' path component // and rooted paths are directory paths; the rest are module paths. func IsDirectoryPath(ns string) bool { // Because go.mod files can move from one system to another, // we check all known path syntaxes, both Unix and Windows. - return strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, "/") || - strings.HasPrefix(ns, `.\`) || strings.HasPrefix(ns, `..\`) || strings.HasPrefix(ns, `\`) || + return ns == "." || strings.HasPrefix(ns, "./") || strings.HasPrefix(ns, `.\`) || + ns == ".." || strings.HasPrefix(ns, "../") || strings.HasPrefix(ns, `..\`) || + strings.HasPrefix(ns, "/") || strings.HasPrefix(ns, `\`) || len(ns) >= 2 && ('A' <= ns[0] && ns[0] <= 'Z' || 'a' <= ns[0] && ns[0] <= 'z') && ns[1] == ':' } diff --git a/.ci/providerlint/vendor/golang.org/x/net/context/context.go b/.ci/providerlint/vendor/golang.org/x/net/context/context.go deleted file mode 100644 index cf66309c4a8..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/context/context.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package context defines the Context type, which carries deadlines, -// cancelation signals, and other request-scoped values across API boundaries -// and between processes. -// As of Go 1.7 this package is available in the standard library under the -// name context. https://golang.org/pkg/context. -// -// Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must -// propagate the Context, optionally replacing it with a modified copy created -// using WithDeadline, WithTimeout, WithCancel, or WithValue. -// -// Programs that use Contexts should follow these rules to keep interfaces -// consistent across packages and enable static analysis tools to check context -// propagation: -// -// Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first -// parameter, typically named ctx: -// -// func DoSomething(ctx context.Context, arg Arg) error { -// // ... use ctx ... -// } -// -// Do not pass a nil Context, even if a function permits it. Pass context.TODO -// if you are unsure about which Context to use. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -// -// The same Context may be passed to functions running in different goroutines; -// Contexts are safe for simultaneous use by multiple goroutines. -// -// See http://blog.golang.org/context for example code for a server that uses -// Contexts. -package context // import "golang.org/x/net/context" - -// Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, -// initialization, and tests, and as the top-level Context for incoming -// requests. -func Background() Context { - return background -} - -// TODO returns a non-nil, empty Context. Code should use context.TODO when -// it's unclear which Context to use or it is not yet available (because the -// surrounding function has not yet been extended to accept a Context -// parameter). TODO is recognized by static analysis tools that determine -// whether Contexts are propagated correctly in a program. -func TODO() Context { - return todo -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/context/go17.go b/.ci/providerlint/vendor/golang.org/x/net/context/go17.go deleted file mode 100644 index 2cb9c408f2e..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/context/go17.go +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package context - -import ( - "context" // standard library's context, as of Go 1.7 - "time" -) - -var ( - todo = context.TODO() - background = context.Background() -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = context.Canceled - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = context.DeadlineExceeded - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - ctx, f := context.WithCancel(parent) - return ctx, f -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - ctx, f := context.WithDeadline(parent, deadline) - return ctx, f -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return context.WithValue(parent, key, val) -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/context/go19.go b/.ci/providerlint/vendor/golang.org/x/net/context/go19.go deleted file mode 100644 index 64d31ecc3ef..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/context/go19.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.9 -// +build go1.9 - -package context - -import "context" // standard library's context, as of Go 1.7 - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context = context.Context - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc = context.CancelFunc diff --git a/.ci/providerlint/vendor/golang.org/x/net/context/pre_go17.go b/.ci/providerlint/vendor/golang.org/x/net/context/pre_go17.go deleted file mode 100644 index 7b6b685114a..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/context/pre_go17.go +++ /dev/null @@ -1,301 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -package context - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// An emptyCtx is never canceled, has no values, and has no deadline. It is not -// struct{}, since vars of this type must have distinct addresses. -type emptyCtx int - -func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { - return -} - -func (*emptyCtx) Done() <-chan struct{} { - return nil -} - -func (*emptyCtx) Err() error { - return nil -} - -func (*emptyCtx) Value(key interface{}) interface{} { - return nil -} - -func (e *emptyCtx) String() string { - switch e { - case background: - return "context.Background" - case todo: - return "context.TODO" - } - return "unknown empty Context" -} - -var ( - background = new(emptyCtx) - todo = new(emptyCtx) -) - -// Canceled is the error returned by Context.Err when the context is canceled. -var Canceled = errors.New("context canceled") - -// DeadlineExceeded is the error returned by Context.Err when the context's -// deadline passes. -var DeadlineExceeded = errors.New("context deadline exceeded") - -// WithCancel returns a copy of parent with a new Done channel. The returned -// context's Done channel is closed when the returned cancel function is called -// or when the parent context's Done channel is closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { - c := newCancelCtx(parent) - propagateCancel(parent, c) - return c, func() { c.cancel(true, Canceled) } -} - -// newCancelCtx returns an initialized cancelCtx. -func newCancelCtx(parent Context) *cancelCtx { - return &cancelCtx{ - Context: parent, - done: make(chan struct{}), - } -} - -// propagateCancel arranges for child to be canceled when parent is. -func propagateCancel(parent Context, child canceler) { - if parent.Done() == nil { - return // parent is never canceled - } - if p, ok := parentCancelCtx(parent); ok { - p.mu.Lock() - if p.err != nil { - // parent has already been canceled - child.cancel(false, p.err) - } else { - if p.children == nil { - p.children = make(map[canceler]bool) - } - p.children[child] = true - } - p.mu.Unlock() - } else { - go func() { - select { - case <-parent.Done(): - child.cancel(false, parent.Err()) - case <-child.Done(): - } - }() - } -} - -// parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this -// package represents its parent. -func parentCancelCtx(parent Context) (*cancelCtx, bool) { - for { - switch c := parent.(type) { - case *cancelCtx: - return c, true - case *timerCtx: - return c.cancelCtx, true - case *valueCtx: - parent = c.Context - default: - return nil, false - } - } -} - -// removeChild removes a context from its parent. -func removeChild(parent Context, child canceler) { - p, ok := parentCancelCtx(parent) - if !ok { - return - } - p.mu.Lock() - if p.children != nil { - delete(p.children, child) - } - p.mu.Unlock() -} - -// A canceler is a context type that can be canceled directly. The -// implementations are *cancelCtx and *timerCtx. -type canceler interface { - cancel(removeFromParent bool, err error) - Done() <-chan struct{} -} - -// A cancelCtx can be canceled. When canceled, it also cancels any children -// that implement canceler. -type cancelCtx struct { - Context - - done chan struct{} // closed by the first cancel call. - - mu sync.Mutex - children map[canceler]bool // set to nil by the first cancel call - err error // set to non-nil by the first cancel call -} - -func (c *cancelCtx) Done() <-chan struct{} { - return c.done -} - -func (c *cancelCtx) Err() error { - c.mu.Lock() - defer c.mu.Unlock() - return c.err -} - -func (c *cancelCtx) String() string { - return fmt.Sprintf("%v.WithCancel", c.Context) -} - -// cancel closes c.done, cancels each of c's children, and, if -// removeFromParent is true, removes c from its parent's children. -func (c *cancelCtx) cancel(removeFromParent bool, err error) { - if err == nil { - panic("context: internal error: missing cancel error") - } - c.mu.Lock() - if c.err != nil { - c.mu.Unlock() - return // already canceled - } - c.err = err - close(c.done) - for child := range c.children { - // NOTE: acquiring the child's lock while holding parent's lock. - child.cancel(false, err) - } - c.children = nil - c.mu.Unlock() - - if removeFromParent { - removeChild(c.Context, c) - } -} - -// WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned -// context's Done channel is closed when the deadline expires, when the returned -// cancel function is called, or when the parent context's Done channel is -// closed, whichever happens first. -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete. -func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { - if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { - // The current deadline is already sooner than the new one. - return WithCancel(parent) - } - c := &timerCtx{ - cancelCtx: newCancelCtx(parent), - deadline: deadline, - } - propagateCancel(parent, c) - d := deadline.Sub(time.Now()) - if d <= 0 { - c.cancel(true, DeadlineExceeded) // deadline has already passed - return c, func() { c.cancel(true, Canceled) } - } - c.mu.Lock() - defer c.mu.Unlock() - if c.err == nil { - c.timer = time.AfterFunc(d, func() { - c.cancel(true, DeadlineExceeded) - }) - } - return c, func() { c.cancel(true, Canceled) } -} - -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then -// delegating to cancelCtx.cancel. -type timerCtx struct { - *cancelCtx - timer *time.Timer // Under cancelCtx.mu. - - deadline time.Time -} - -func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { - return c.deadline, true -} - -func (c *timerCtx) String() string { - return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) -} - -func (c *timerCtx) cancel(removeFromParent bool, err error) { - c.cancelCtx.cancel(false, err) - if removeFromParent { - // Remove this timerCtx from its parent cancelCtx's children. - removeChild(c.cancelCtx.Context, c) - } - c.mu.Lock() - if c.timer != nil { - c.timer.Stop() - c.timer = nil - } - c.mu.Unlock() -} - -// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). -// -// Canceling this context releases resources associated with it, so code should -// call cancel as soon as the operations running in this Context complete: -// -// func slowOperationWithTimeout(ctx context.Context) (Result, error) { -// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) -// defer cancel() // releases resources if slowOperation completes before timeout elapses -// return slowOperation(ctx) -// } -func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { - return WithDeadline(parent, time.Now().Add(timeout)) -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. -// -// Use context Values only for request-scoped data that transits processes and -// APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key interface{}, val interface{}) Context { - return &valueCtx{parent, key, val} -} - -// A valueCtx carries a key-value pair. It implements Value for that key and -// delegates all other calls to the embedded Context. -type valueCtx struct { - Context - key, val interface{} -} - -func (c *valueCtx) String() string { - return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) -} - -func (c *valueCtx) Value(key interface{}) interface{} { - if c.key == key { - return c.val - } - return c.Context.Value(key) -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/context/pre_go19.go b/.ci/providerlint/vendor/golang.org/x/net/context/pre_go19.go deleted file mode 100644 index 1f9715341fa..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/context/pre_go19.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.9 -// +build !go1.9 - -package context - -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/databuffer.go b/.ci/providerlint/vendor/golang.org/x/net/http2/databuffer.go index a3067f8de74..e6f55cbd163 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/databuffer.go +++ b/.ci/providerlint/vendor/golang.org/x/net/http2/databuffer.go @@ -20,41 +20,44 @@ import ( // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(dataChunkSizeClasses)-1; i++ { - if size <= int64(dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return dataChunkPools[i].Get().([]byte) } func putDataBufferChunk(p []byte) { - for i, n := range dataChunkSizeClasses { - if len(p) == n { - dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/go111.go b/.ci/providerlint/vendor/golang.org/x/net/http2/go111.go deleted file mode 100644 index 5bf62b032ec..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/go111.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/go115.go b/.ci/providerlint/vendor/golang.org/x/net/http2/go115.go deleted file mode 100644 index 908af1ab93c..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/go115.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.15 -// +build go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/go118.go b/.ci/providerlint/vendor/golang.org/x/net/http2/go118.go deleted file mode 100644 index aca4b2b31ac..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.18 -// +build go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/not_go111.go b/.ci/providerlint/vendor/golang.org/x/net/http2/not_go111.go deleted file mode 100644 index cc0baa8197f..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/not_go111.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package http2 - -import ( - "net/http/httptrace" - "net/textproto" -) - -func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } - -func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} - -func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - return nil -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/not_go115.go b/.ci/providerlint/vendor/golang.org/x/net/http2/not_go115.go deleted file mode 100644 index e6c04cf7ac7..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/not_go115.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.15 -// +build !go1.15 - -package http2 - -import ( - "context" - "crypto/tls" -) - -// dialTLSWithContext opens a TLS connection. -func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if cfg.InsecureSkipVerify { - return cn, nil - } - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - return cn, nil -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/not_go118.go b/.ci/providerlint/vendor/golang.org/x/net/http2/not_go118.go deleted file mode 100644 index eab532c96bc..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/not_go118.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.18 -// +build !go1.18 - -package http2 - -import ( - "crypto/tls" - "net" -) - -func tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return nil -} diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/server.go b/.ci/providerlint/vendor/golang.org/x/net/http2/server.go index 02c88b6b3e1..ae94c6408d5 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/server.go +++ b/.ci/providerlint/vendor/golang.org/x/net/http2/server.go @@ -2549,7 +2549,6 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2669,7 +2668,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -2690,7 +2688,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -2702,9 +2699,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -2920,14 +2914,12 @@ func (rws *responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -2992,19 +2984,10 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - responseWriterStatePool.Put(rws) - } + responseWriterStatePool.Put(rws) } // Push errors. @@ -3187,6 +3170,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) { panic(fmt.Sprintf("newWriterAndRequestNoBody(%+v): %v", msg.url, err)) } + sc.curHandlers++ go sc.runHandler(rw, req, sc.handler.ServeHTTP) return promisedID, nil } diff --git a/.ci/providerlint/vendor/golang.org/x/net/http2/transport.go b/.ci/providerlint/vendor/golang.org/x/net/http2/transport.go index 4515b22c4a1..df578b86c65 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/http2/transport.go +++ b/.ci/providerlint/vendor/golang.org/x/net/http2/transport.go @@ -1018,7 +1018,7 @@ func (cc *ClientConn) forceCloseConn() { if !ok { return } - if nc := tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -3201,3 +3201,34 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) { trace.GotFirstResponseByte() } } + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/go118.go b/.ci/providerlint/vendor/golang.org/x/net/idna/go118.go index c5c4338dbed..712f1ad839f 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/go118.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.18 -// +build go1.18 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/idna10.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/idna10.0.0.go index 64ccf85febb..7b371788473 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/idna10.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/idna10.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.10 -// +build go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/idna9.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/idna9.0.0.go index ee1698cefbd..cc6a892a4a3 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/idna9.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/idna9.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.10 -// +build !go1.10 // Package idna implements IDNA2008 using the compatibility processing // defined by UTS (Unicode Technical Standard) #46, which defines a standard to diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/pre_go118.go b/.ci/providerlint/vendor/golang.org/x/net/idna/pre_go118.go index 3aaccab1c5a..40e74bb3d2a 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/pre_go118.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/pre_go118.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.18 -// +build !go1.18 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/tables10.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/tables10.0.0.go index d1d62ef459b..c6c2bf10a60 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/tables10.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/tables10.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.10 && !go1.13 -// +build go1.10,!go1.13 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/tables11.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/tables11.0.0.go index 167efba7125..76789393cc0 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/tables11.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/tables11.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.13 && !go1.14 -// +build go1.13,!go1.14 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/tables12.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/tables12.0.0.go index ab40f7bcc3b..0600cd2ae54 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/tables12.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/tables12.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.14 && !go1.16 -// +build go1.14,!go1.16 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/tables13.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/tables13.0.0.go index 66701eadfb3..2fb768ef6d9 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/tables13.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/tables13.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.16 && !go1.21 -// +build go1.16,!go1.21 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/tables15.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/tables15.0.0.go index 40033778f01..5ff05fe1afc 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/tables15.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/tables15.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build go1.21 -// +build go1.21 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/tables9.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/tables9.0.0.go index 4074b5332e3..0f25e84ca20 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/tables9.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/tables9.0.0.go @@ -1,7 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. //go:build !go1.10 -// +build !go1.10 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/trie12.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/trie12.0.0.go index bb63f904b37..8a75b966733 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/trie12.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/trie12.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build !go1.16 -// +build !go1.16 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/net/idna/trie13.0.0.go b/.ci/providerlint/vendor/golang.org/x/net/idna/trie13.0.0.go index 7d68a8dc13c..fa45bb90745 100644 --- a/.ci/providerlint/vendor/golang.org/x/net/idna/trie13.0.0.go +++ b/.ci/providerlint/vendor/golang.org/x/net/idna/trie13.0.0.go @@ -5,7 +5,6 @@ // license that can be found in the LICENSE file. //go:build go1.16 -// +build go1.16 package idna diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/fcntl.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/fcntl.go index 58c6bfc70f6..6200876fb28 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/fcntl.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/fcntl.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build dragonfly || freebsd || linux || netbsd || openbsd +//go:build dragonfly || freebsd || linux || netbsd package unix diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/ioctl_linux.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/ioctl_linux.go index 0d12c0851ad..dbe680eab88 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/ioctl_linux.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/ioctl_linux.go @@ -231,3 +231,8 @@ func IoctlLoopGetStatus64(fd int) (*LoopInfo64, error) { func IoctlLoopSetStatus64(fd int, value *LoopInfo64) error { return ioctlPtr(fd, LOOP_SET_STATUS64, unsafe.Pointer(value)) } + +// IoctlLoopConfigure configures all loop device parameters in a single step +func IoctlLoopConfigure(fd int, value *LoopConfig) error { + return ioctlPtr(fd, LOOP_CONFIGURE, unsafe.Pointer(value)) +} diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/mkerrors.sh b/.ci/providerlint/vendor/golang.org/x/sys/unix/mkerrors.sh index cbe24150a7a..6202638bae8 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -519,6 +519,7 @@ ccflags="$@" $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || $2 ~ /^LO_(KEY|NAME)_SIZE$/ || $2 ~ /^LOOP_(CLR|CTL|GET|SET)_/ || + $2 == "LOOP_CONFIGURE" || $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ || $2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ || $2 ~ /^NFC_.*_(MAX)?SIZE$/ || @@ -560,7 +561,7 @@ ccflags="$@" $2 ~ /^RLIMIT_(AS|CORE|CPU|DATA|FSIZE|LOCKS|MEMLOCK|MSGQUEUE|NICE|NOFILE|NPROC|RSS|RTPRIO|RTTIME|SIGPENDING|STACK)|RLIM_INFINITY/ || $2 ~ /^PRIO_(PROCESS|PGRP|USER)/ || $2 ~ /^CLONE_[A-Z_]+/ || - $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+)$/ && + $2 !~ /^(BPF_TIMEVAL|BPF_FIB_LOOKUP_[A-Z]+|BPF_F_LINK)$/ && $2 ~ /^(BPF|DLT)_/ || $2 ~ /^AUDIT_/ || $2 ~ /^(CLOCK|TIMER)_/ || diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_bsd.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_bsd.go index 6f328e3a554..a00c3e5450b 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -316,7 +316,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } //sys recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Socklen) (n int, err error) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_linux.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_linux.go index a5e1c10e341..0f85e29e621 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -61,15 +61,23 @@ func FanotifyMark(fd int, flags uint, mask uint64, dirFd int, pathname string) ( } //sys fchmodat(dirfd int, path string, mode uint32) (err error) - -func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { - // Linux fchmodat doesn't support the flags parameter. Mimick glibc's behavior - // and check the flags. Otherwise the mode would be applied to the symlink - // destination which is not what the user expects. - if flags&^AT_SYMLINK_NOFOLLOW != 0 { - return EINVAL - } else if flags&AT_SYMLINK_NOFOLLOW != 0 { - return EOPNOTSUPP +//sys fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) + +func Fchmodat(dirfd int, path string, mode uint32, flags int) error { + // Linux fchmodat doesn't support the flags parameter, but fchmodat2 does. + // Try fchmodat2 if flags are specified. + if flags != 0 { + err := fchmodat2(dirfd, path, mode, flags) + if err == ENOSYS { + // fchmodat2 isn't available. If the flags are known to be valid, + // return EOPNOTSUPP to indicate that fchmodat doesn't support them. + if flags&^(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EINVAL + } else if flags&(AT_SYMLINK_NOFOLLOW|AT_EMPTY_PATH) != 0 { + return EOPNOTSUPP + } + } + return err } return fchmodat(dirfd, path, mode) } @@ -1302,7 +1310,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func GetsockoptTpacketStats(fd, level, opt int) (*TpacketStats, error) { diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_openbsd.go index d2882ee04f7..b25343c71a4 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -166,6 +166,20 @@ func Getresgid() (rgid, egid, sgid int) { //sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys fcntl(fd int, cmd int, arg int) (n int, err error) +//sys fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) = SYS_FCNTL + +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + return fcntl(int(fd), cmd, arg) +} + +// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { + _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) + return err +} + //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_solaris.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_solaris.go index 60c8142d49e..21974af064d 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -158,7 +158,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { if err != nil { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } const ImplementsGetwd = true diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go index d99d05f1bc1..b473038c615 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go @@ -1104,7 +1104,7 @@ func GetsockoptString(fd, level, opt int) (string, error) { return "", err } - return string(buf[:vallen-1]), nil + return ByteSliceToString(buf[:vallen]), nil } func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zerrors_linux.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9c00cbf512c..c73cfe2f10b 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -486,7 +486,6 @@ const ( BPF_F_ANY_ALIGNMENT = 0x2 BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 - BPF_F_LINK = 0x2000 BPF_F_NETFILTER_IP_DEFRAG = 0x1 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 @@ -1802,6 +1801,7 @@ const ( LOCK_SH = 0x1 LOCK_UN = 0x8 LOOP_CLR_FD = 0x4c01 + LOOP_CONFIGURE = 0x4c0a LOOP_CTL_ADD = 0x4c80 LOOP_CTL_GET_FREE = 0x4c82 LOOP_CTL_REMOVE = 0x4c81 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_linux.go index faca7a557b1..1488d27128c 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -37,6 +37,21 @@ func fchmodat(dirfd int, path string, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fchmodat2(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT2, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ioctl(fd int, req uint, arg uintptr) (err error) { _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 88bfc288578..a1d061597cc 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s index 4cbeff171b2..41b5617316c 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index b8a67b99af8..5b2a7409778 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s index 1123f27571e..4019a656f6d 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index af50a65c0cd..f6eda1344a8 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s index 82badae39fe..ac4af24f908 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $4 DATA ·libc_sysctl_trampoline_addr(SB)/4, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $4 +DATA ·libc_fcntl_trampoline_addr(SB)/4, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $4 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 8fb4ff36a7d..55df20ae9d8 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s index 24d7eecb93b..f77d532121b 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go index f469a83ee6e..8c1155cbc08 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s index 9a498a06773..fae140b62c9 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_mips64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go index c26ca2e1aa2..7cc80c58d98 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s index 1f224aa4162..9d1e0ff06d0 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_ppc64.s @@ -213,6 +213,12 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + CALL libc_fcntl(SB) + RET +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 CALL libc_ppoll(SB) RET diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go index bcc920dd259..0688737f494 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.go @@ -584,6 +584,32 @@ var libc_sysctl_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func fcntl(fd int, cmd int, arg int) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_fcntl_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_fcntl fcntl "libc.so" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func fcntlPtr(fd int, cmd int, arg unsafe.Pointer) (n int, err error) { + r0, _, e1 := syscall_syscall(libc_fcntl_trampoline_addr, uintptr(fd), uintptr(cmd), uintptr(arg)) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { r0, _, e1 := syscall_syscall6(libc_ppoll_trampoline_addr, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)), 0, 0) n = int(r0) diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s index 87a79c7095a..da115f9a4b6 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/zsyscall_openbsd_riscv64.s @@ -178,6 +178,11 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8 DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB) +TEXT libc_fcntl_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_fcntl(SB) +GLOBL ·libc_fcntl_trampoline_addr(SB), RODATA, $8 +DATA ·libc_fcntl_trampoline_addr(SB)/8, $libc_fcntl_trampoline<>(SB) + TEXT libc_ppoll_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_ppoll(SB) GLOBL ·libc_ppoll_trampoline_addr(SB), RODATA, $8 diff --git a/.ci/providerlint/vendor/golang.org/x/sys/unix/ztypes_linux.go b/.ci/providerlint/vendor/golang.org/x/sys/unix/ztypes_linux.go index 997bcd55ae9..bbf8399ff58 100644 --- a/.ci/providerlint/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/.ci/providerlint/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -2671,6 +2671,7 @@ const ( BPF_PROG_TYPE_LSM = 0x1d BPF_PROG_TYPE_SK_LOOKUP = 0x1e BPF_PROG_TYPE_SYSCALL = 0x1f + BPF_PROG_TYPE_NETFILTER = 0x20 BPF_CGROUP_INET_INGRESS = 0x0 BPF_CGROUP_INET_EGRESS = 0x1 BPF_CGROUP_INET_SOCK_CREATE = 0x2 @@ -2715,6 +2716,11 @@ const ( BPF_PERF_EVENT = 0x29 BPF_TRACE_KPROBE_MULTI = 0x2a BPF_LSM_CGROUP = 0x2b + BPF_STRUCT_OPS = 0x2c + BPF_NETFILTER = 0x2d + BPF_TCX_INGRESS = 0x2e + BPF_TCX_EGRESS = 0x2f + BPF_TRACE_UPROBE_MULTI = 0x30 BPF_LINK_TYPE_UNSPEC = 0x0 BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 BPF_LINK_TYPE_TRACING = 0x2 @@ -2725,6 +2731,18 @@ const ( BPF_LINK_TYPE_PERF_EVENT = 0x7 BPF_LINK_TYPE_KPROBE_MULTI = 0x8 BPF_LINK_TYPE_STRUCT_OPS = 0x9 + BPF_LINK_TYPE_NETFILTER = 0xa + BPF_LINK_TYPE_TCX = 0xb + BPF_LINK_TYPE_UPROBE_MULTI = 0xc + BPF_PERF_EVENT_UNSPEC = 0x0 + BPF_PERF_EVENT_UPROBE = 0x1 + BPF_PERF_EVENT_URETPROBE = 0x2 + BPF_PERF_EVENT_KPROBE = 0x3 + BPF_PERF_EVENT_KRETPROBE = 0x4 + BPF_PERF_EVENT_TRACEPOINT = 0x5 + BPF_PERF_EVENT_EVENT = 0x6 + BPF_F_KPROBE_MULTI_RETURN = 0x1 + BPF_F_UPROBE_MULTI_RETURN = 0x1 BPF_ANY = 0x0 BPF_NOEXIST = 0x1 BPF_EXIST = 0x2 @@ -2742,6 +2760,8 @@ const ( BPF_F_MMAPABLE = 0x400 BPF_F_PRESERVE_ELEMS = 0x800 BPF_F_INNER_MAP = 0x1000 + BPF_F_LINK = 0x2000 + BPF_F_PATH_FD = 0x4000 BPF_STATS_RUN_TIME = 0x0 BPF_STACK_BUILD_ID_EMPTY = 0x0 BPF_STACK_BUILD_ID_VALID = 0x1 @@ -2762,6 +2782,7 @@ const ( BPF_F_ZERO_CSUM_TX = 0x2 BPF_F_DONT_FRAGMENT = 0x4 BPF_F_SEQ_NUMBER = 0x8 + BPF_F_NO_TUNNEL_KEY = 0x10 BPF_F_TUNINFO_FLAGS = 0x10 BPF_F_INDEX_MASK = 0xffffffff BPF_F_CURRENT_CPU = 0xffffffff @@ -2778,6 +2799,8 @@ const ( BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 BPF_F_ADJ_ROOM_ENCAP_L2_ETH = 0x40 + BPF_F_ADJ_ROOM_DECAP_L3_IPV4 = 0x80 + BPF_F_ADJ_ROOM_DECAP_L3_IPV6 = 0x100 BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 BPF_F_SYSCTL_BASE_NAME = 0x1 @@ -2866,6 +2889,8 @@ const ( BPF_DEVCG_DEV_CHAR = 0x2 BPF_FIB_LOOKUP_DIRECT = 0x1 BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LOOKUP_SKIP_NEIGH = 0x4 + BPF_FIB_LOOKUP_TBID = 0x8 BPF_FIB_LKUP_RET_SUCCESS = 0x0 BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 @@ -2901,6 +2926,7 @@ const ( BPF_CORE_ENUMVAL_EXISTS = 0xa BPF_CORE_ENUMVAL_VALUE = 0xb BPF_CORE_TYPE_MATCHES = 0xc + BPF_F_TIMER_ABS = 0x1 ) const ( @@ -2979,6 +3005,12 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } +type LoopConfig struct { + Fd uint32 + Size uint32 + Info LoopInfo64 + _ [8]uint64 +} type TIPCSocketAddr struct { Ref uint32 diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/.travis.yml b/.ci/providerlint/vendor/google.golang.org/appengine/.travis.yml deleted file mode 100644 index 6d03f4d36e8..00000000000 --- a/.ci/providerlint/vendor/google.golang.org/appengine/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -language: go - -go_import_path: google.golang.org/appengine - -install: - - ./travis_install.sh - -script: - - ./travis_test.sh - -matrix: - include: - - go: 1.9.x - env: GOAPP=true - - go: 1.10.x - env: GOAPP=false - - go: 1.11.x - env: GO111MODULE=on diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/CONTRIBUTING.md b/.ci/providerlint/vendor/google.golang.org/appengine/CONTRIBUTING.md index ffc29852085..289693613cc 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/CONTRIBUTING.md +++ b/.ci/providerlint/vendor/google.golang.org/appengine/CONTRIBUTING.md @@ -19,14 +19,12 @@ ## Running system tests -Download and install the [Go App Engine SDK](https://cloud.google.com/appengine/docs/go/download). Make sure the `go_appengine` dir is in your `PATH`. - Set the `APPENGINE_DEV_APPSERVER` environment variable to `/path/to/go_appengine/dev_appserver.py`. -Run tests with `goapp test`: +Run tests with `go test`: ``` -goapp test -v google.golang.org/appengine/... +go test -v google.golang.org/appengine/... ``` ## Contributor License Agreements diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/README.md b/.ci/providerlint/vendor/google.golang.org/appengine/README.md index 9fdbacd3c60..5ccddd9990d 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/README.md +++ b/.ci/providerlint/vendor/google.golang.org/appengine/README.md @@ -1,6 +1,6 @@ # Go App Engine packages -[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine) +[![CI Status](https://github.com/golang/appengine/actions/workflows/ci.yml/badge.svg)](https://github.com/golang/appengine/actions/workflows/ci.yml) This repository supports the Go runtime on *App Engine standard*. It provides APIs for interacting with App Engine services. @@ -51,7 +51,7 @@ code importing `appengine/datastore` will now need to import `google.golang.org/ Most App Engine services are available with exactly the same API. A few APIs were cleaned up, and there are some differences: -* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`. +* `appengine.Context` has been replaced with the `Context` type from `context`. * Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`. * `appengine.Timeout` has been removed. Use `context.WithTimeout` instead. * `appengine.Datacenter` now takes a `context.Context` argument. @@ -72,7 +72,7 @@ A few APIs were cleaned up, and there are some differences: * `appengine/socket` is not required on App Engine flexible environment / Managed VMs. Use the standard `net` package instead. -## Key Encode/Decode compatibiltiy to help with datastore library migrations +## Key Encode/Decode compatibility to help with datastore library migrations Key compatibility updates have been added to help customers transition from google.golang.org/appengine/datastore to cloud.google.com/go/datastore. The `EnableKeyConversion` enables automatic conversion from a key encoded with cloud.google.com/go/datastore to google.golang.org/appengine/datastore key type. diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/appengine.go b/.ci/providerlint/vendor/google.golang.org/appengine/appengine.go index 8c9697674f2..35ba9c89676 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/appengine.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/appengine.go @@ -9,10 +9,10 @@ package appengine // import "google.golang.org/appengine" import ( + "context" "net/http" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" ) @@ -35,18 +35,18 @@ import ( // // Main is designed so that the app's main package looks like this: // -// package main +// package main // -// import ( -// "google.golang.org/appengine" +// import ( +// "google.golang.org/appengine" // -// _ "myapp/package0" -// _ "myapp/package1" -// ) +// _ "myapp/package0" +// _ "myapp/package1" +// ) // -// func main() { -// appengine.Main() -// } +// func main() { +// appengine.Main() +// } // // The "myapp/packageX" packages are expected to register HTTP handlers // in their init functions. @@ -54,6 +54,9 @@ func Main() { internal.Main() } +// Middleware wraps an http handler so that it can make GAE API calls +var Middleware func(http.Handler) http.Handler = internal.Middleware + // IsDevAppServer reports whether the App Engine app is running in the // development App Server. func IsDevAppServer() bool { diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/appengine_vm.go b/.ci/providerlint/vendor/google.golang.org/appengine/appengine_vm.go index f4b645aad3b..6e1d041cd95 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/appengine_vm.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/appengine_vm.go @@ -2,19 +2,19 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package appengine import ( - "golang.org/x/net/context" - - "google.golang.org/appengine/internal" + "context" ) // BackgroundContext returns a context not associated with a request. -// This should only be used when not servicing a request. -// This only works in App Engine "flexible environment". +// +// Deprecated: App Engine no longer has a special background context. +// Just use context.Background(). func BackgroundContext() context.Context { - return internal.BackgroundContext() + return context.Background() } diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/datastore.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/datastore.go index 576bc50132a..790fca771f1 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/datastore.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/datastore.go @@ -5,12 +5,12 @@ package datastore import ( + "context" "errors" "fmt" "reflect" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine" "google.golang.org/appengine/internal" diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/doc.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/doc.go index 85616cf2741..1ecf51885fa 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/doc.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/doc.go @@ -5,8 +5,7 @@ /* Package datastore provides a client for App Engine's datastore service. - -Basic Operations +# Basic Operations Entities are the unit of storage and are associated with a key. A key consists of an optional parent key, a string application ID, a string kind @@ -74,8 +73,7 @@ GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and Delete functions. They take a []*Key instead of a *Key, and may return an appengine.MultiError when encountering partial failure. - -Properties +# Properties An entity's contents can be represented by a variety of types. These are typically struct pointers, but can also be any type that implements the @@ -137,8 +135,7 @@ Example code: J int `datastore:",noindex" json:"j"` } - -Structured Properties +# Structured Properties If the struct pointed to contains other structs, then the nested or embedded structs are flattened. For example, given these definitions: @@ -179,8 +176,7 @@ equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`. If an outer struct is tagged "noindex" then all of its implicit flattened fields are effectively "noindex". - -The PropertyLoadSaver Interface +# The PropertyLoadSaver Interface An entity's contents can also be represented by any type that implements the PropertyLoadSaver interface. This type may be a struct pointer, but it does @@ -230,8 +226,7 @@ Example code: The *PropertyList type implements PropertyLoadSaver, and can therefore hold an arbitrary entity's contents. - -Queries +# Queries Queries retrieve entities based on their properties or key's ancestry. Running a query yields an iterator of results: either keys or (key, entity) pairs. @@ -284,8 +279,7 @@ Example code: io.Copy(w, b) } - -Transactions +# Transactions RunInTransaction runs a function in a transaction. @@ -323,8 +317,7 @@ Example code: fmt.Fprintf(w, "Count=%d", count) } - -Metadata +# Metadata The datastore package provides access to some of App Engine's datastore metadata. This metadata includes information about the entity groups, diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/key.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/key.go index fd598dc9657..e312df519db 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/key.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/key.go @@ -6,6 +6,7 @@ package datastore import ( "bytes" + "context" "encoding/base64" "encoding/gob" "errors" @@ -14,7 +15,6 @@ import ( "strings" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/keycompat.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/keycompat.go index 371a64eeefe..e852f29cf7b 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/keycompat.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/keycompat.go @@ -5,10 +5,9 @@ package datastore import ( + "context" "sync" - "golang.org/x/net/context" - "google.golang.org/appengine/datastore/internal/cloudkey" "google.golang.org/appengine/internal" ) diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/metadata.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/metadata.go index 6acacc3db9a..e1b2d2259bd 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/metadata.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/metadata.go @@ -4,7 +4,7 @@ package datastore -import "golang.org/x/net/context" +import "context" // Datastore kinds for the metadata entities. const ( @@ -50,13 +50,14 @@ func keyNames(keys []*Key) []string { // The properties are returned as a map of property names to a slice of the // representation types. The representation types for the supported Go property // types are: -// "INT64": signed integers and time.Time -// "DOUBLE": float32 and float64 -// "BOOLEAN": bool -// "STRING": string, []byte and ByteString -// "POINT": appengine.GeoPoint -// "REFERENCE": *Key -// "USER": (not used in the Go runtime) +// +// "INT64": signed integers and time.Time +// "DOUBLE": float32 and float64 +// "BOOLEAN": bool +// "STRING": string, []byte and ByteString +// "POINT": appengine.GeoPoint +// "REFERENCE": *Key +// "USER": (not used in the Go runtime) func KindProperties(ctx context.Context, kind string) (map[string][]string, error) { // TODO(djd): Support range queries. kindKey := NewKey(ctx, kindKind, kind, 0, nil) diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/query.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/query.go index 4124534b22f..b1b80bf7b6c 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/query.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/query.go @@ -5,6 +5,7 @@ package datastore import ( + "context" "encoding/base64" "errors" "fmt" @@ -13,7 +14,6 @@ import ( "strings" "github.com/golang/protobuf/proto" - "golang.org/x/net/context" "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" @@ -476,7 +476,7 @@ func callNext(c context.Context, res *pb.QueryResult, offset, count int32) error // The keys returned by GetAll will be in a 1-1 correspondence with the entities // added to dst. // -// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys. +// If q is a “keys-only” query, GetAll ignores dst and only returns the keys. // // The running time and number of API calls made by GetAll scale linearly with // the sum of the query's offset and limit. Unless the result count is @@ -754,7 +754,7 @@ func (c Cursor) String() string { return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=") } -// Decode decodes a cursor from its base-64 string representation. +// DecodeCursor decodes a cursor from its base-64 string representation. func DecodeCursor(s string) (Cursor, error) { if s == "" { return Cursor{&zeroCC}, nil diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/transaction.go b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/transaction.go index 2ae8428f856..06deeb43e77 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/datastore/transaction.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/datastore/transaction.go @@ -5,10 +5,9 @@ package datastore import ( + "context" "errors" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/datastore" ) diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/identity.go b/.ci/providerlint/vendor/google.golang.org/appengine/identity.go index b8dcf8f3619..1202fc1a531 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/identity.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/identity.go @@ -5,10 +5,9 @@ package appengine import ( + "context" "time" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" pb "google.golang.org/appengine/internal/app_identity" modpb "google.golang.org/appengine/internal/modules" diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/api.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/api.go index 721053c20a1..0569f5dd43e 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/api.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/api.go @@ -2,12 +2,14 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( "bytes" + "context" "errors" "fmt" "io/ioutil" @@ -24,7 +26,6 @@ import ( "time" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" logpb "google.golang.org/appengine/internal/log" @@ -32,8 +33,7 @@ import ( ) const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" + apiPath = "/rpc_http" ) var ( @@ -65,21 +65,22 @@ var ( IdleConnTimeout: 90 * time.Second, }, } - - defaultTicketOnce sync.Once - defaultTicket string - backgroundContextOnce sync.Once - backgroundContext netcontext.Context ) -func apiURL() *url.URL { +func apiURL(ctx context.Context) *url.URL { host, port := "appengine.googleapis.internal", "10001" if h := os.Getenv("API_HOST"); h != "" { host = h } + if hostOverride := ctx.Value(apiHostOverrideKey); hostOverride != nil { + host = hostOverride.(string) + } if p := os.Getenv("API_PORT"); p != "" { port = p } + if portOverride := ctx.Value(apiPortOverrideKey); portOverride != nil { + port = portOverride.(string) + } return &url.URL{ Scheme: "http", Host: host + ":" + port, @@ -87,82 +88,97 @@ func apiURL() *url.URL { } } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - r = r.WithContext(withContext(r.Context(), c)) - c.req = r - - stopFlushing := make(chan int) +// Middleware wraps an http handler so that it can make GAE API calls +func Middleware(next http.Handler) http.Handler { + return handleHTTPMiddleware(executeRequestSafelyMiddleware(next)) +} - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } +func handleHTTPMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + c := &aeContext{ + req: r, + outHeader: w.Header(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) + if logToLogservice() { + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + } - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more + next.ServeHTTP(c, r) + c.outHeader = nil // make sure header changes aren't respected any more - stopFlushing <- 1 // any logging beyond this point will be dropped + flushed := make(chan struct{}) + if logToLogservice() { + stopFlushing <- 1 // any logging beyond this point will be dropped - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - flushed := make(chan struct{}) - go func() { - defer close(flushed) - // Force a log flush, because with very short requests we - // may not ever flush logs. - c.flushLog(true) - }() - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + } - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } - // Wait for the last flush to complete before returning, - // otherwise the security ticket will not be valid. - <-flushed + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + if logToLogservice() { + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed + } + }) } -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() +func executeRequestSafelyMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if x := recover(); x != nil { + c := w.(*aeContext) + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() - http.DefaultServeMux.ServeHTTP(c, r) + next.ServeHTTP(w, r) + }) } func renderPanic(x interface{}) string { @@ -204,9 +220,9 @@ func renderPanic(x interface{}) string { return string(buf) } -// context represents the context of an in-flight HTTP request. +// aeContext represents the aeContext of an in-flight HTTP request. // It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { +type aeContext struct { req *http.Request outCode int @@ -218,8 +234,6 @@ type context struct { lines []*logpb.UserAppLogLine flushes int } - - apiURL *url.URL } var contextKey = "holds a *context" @@ -227,8 +241,8 @@ var contextKey = "holds a *context" // jointContext joins two contexts in a superficial way. // It takes values and timeouts from a base context, and only values from another context. type jointContext struct { - base netcontext.Context - valuesOnly netcontext.Context + base context.Context + valuesOnly context.Context } func (c jointContext) Deadline() (time.Time, bool) { @@ -252,94 +266,54 @@ func (c jointContext) Value(key interface{}) interface{} { // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) +func fromContext(ctx context.Context) *aeContext { + c, _ := ctx.Value(&contextKey).(*aeContext) return c } -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c *aeContext) context.Context { + ctx := context.WithValue(parent, &contextKey, c) if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { ctx = withNamespace(ctx, ns) } return ctx } -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) +func toContext(c *aeContext) context.Context { + return withContext(context.Background(), c) } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { return c.req.Header } return nil } -func ReqContext(req *http.Request) netcontext.Context { +func ReqContext(req *http.Request) context.Context { return req.Context() } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { return jointContext{ base: parent, valuesOnly: req.Context(), } } -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - backgroundContextOnce.Do(func() { - // Compute background security ticket. - ticket := DefaultTicket() - - c := &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - backgroundContext = toContext(c) - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go c.logFlusher(make(chan int)) - }) - - return backgroundContext -} - // RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. +// any API calls are sent to the provided URL. // It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctx := withContext(decorate(req.Context()), c) - req = req.WithContext(ctx) - c.req = req - return req, func() {} +func RegisterTestRequest(req *http.Request, apiURL *url.URL, appID string) *http.Request { + ctx := req.Context() + ctx = withAPIHostOverride(ctx, apiURL.Hostname()) + ctx = withAPIPortOverride(ctx, apiURL.Port()) + ctx = WithAppIDOverride(ctx, appID) + + // use the unregistered request as a placeholder so that withContext can read the headers + c := &aeContext{req: req} + c.req = req.WithContext(withContext(ctx, c)) + return c.req } var errTimeout = &CallError{ @@ -348,7 +322,7 @@ var errTimeout = &CallError{ Timeout: true, } -func (c *context) Header() http.Header { return c.outHeader } +func (c *aeContext) Header() http.Header { return c.outHeader } // Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status // codes do not permit a response body (nor response entity headers such as @@ -365,7 +339,7 @@ func bodyAllowedForStatus(status int) bool { return true } -func (c *context) Write(b []byte) (int, error) { +func (c *aeContext) Write(b []byte) (int, error) { if c.outCode == 0 { c.WriteHeader(http.StatusOK) } @@ -376,7 +350,7 @@ func (c *context) Write(b []byte) (int, error) { return len(b), nil } -func (c *context) WriteHeader(code int) { +func (c *aeContext) WriteHeader(code int) { if c.outCode != 0 { logf(c, 3, "WriteHeader called multiple times on request.") // error level return @@ -384,10 +358,11 @@ func (c *context) WriteHeader(code int) { c.outCode = code } -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { +func post(ctx context.Context, body []byte, timeout time.Duration) (b []byte, err error) { + apiURL := apiURL(ctx) hreq := &http.Request{ Method: "POST", - URL: c.apiURL, + URL: apiURL, Header: http.Header{ apiEndpointHeader: apiEndpointHeaderValue, apiMethodHeader: apiMethodHeaderValue, @@ -396,13 +371,16 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) }, Body: ioutil.NopCloser(bytes.NewReader(body)), ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) + Host: apiURL.Host, } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) + c := fromContext(ctx) + if c != nil { + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } } tr := apiHTTPClient.Transport.(*http.Transport) @@ -444,7 +422,7 @@ func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) return hrespBody, nil } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -463,15 +441,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) } c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } // Apply transaction modifications if we're in a transaction. if t := transactionFromContext(ctx); t != nil { if t.finished { - return errors.New("transaction context has expired") + return errors.New("transaction aeContext has expired") } applyTransaction(in, &t.transaction) } @@ -487,20 +461,13 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix + ticket := "" + if c != nil { + ticket = c.req.Header.Get(ticketHeader) + if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { + ticket = dri } } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - if dri := c.req.Header.Get(devRequestIdHeader); IsDevAppServer() && dri != "" { - ticket = dri - } req := &remotepb.Request{ ServiceName: &service, Method: &method, @@ -512,7 +479,7 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } - hrespBody, err := c.post(hreqBody, timeout) + hrespBody, err := post(ctx, hreqBody, timeout) if err != nil { return err } @@ -549,11 +516,11 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return proto.Unmarshal(res.Response, out) } -func (c *context) Request() *http.Request { +func (c *aeContext) Request() *http.Request { return c.req } -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { +func (c *aeContext) addLogLine(ll *logpb.UserAppLogLine) { // Truncate long log lines. // TODO(dsymonds): Check if this is still necessary. const lim = 8 << 10 @@ -575,18 +542,20 @@ var logLevelName = map[int64]string{ 4: "CRITICAL", } -func logf(c *context, level int64, format string, args ...interface{}) { +func logf(c *aeContext, level int64, format string, args ...interface{}) { if c == nil { - panic("not an App Engine context") + panic("not an App Engine aeContext") } s := fmt.Sprintf(format, args...) s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - // Only duplicate log to stderr if not running on App Engine second generation + if logToLogservice() { + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + } + // Log to stdout if not deployed if !IsSecondGen() { log.Print(logLevelName[level] + ": " + s) } @@ -594,7 +563,7 @@ func logf(c *context, level int64, format string, args ...interface{}) { // flushLog attempts to flush any pending logs to the appserver. // It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { +func (c *aeContext) flushLog(force bool) (flushed bool) { c.pendingLogs.Lock() // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. n, rem := 0, 30<<20 @@ -655,7 +624,7 @@ const ( forceFlushInterval = 60 * time.Second ) -func (c *context) logFlusher(stop <-chan int) { +func (c *aeContext) logFlusher(stop <-chan int) { lastFlush := time.Now() tick := time.NewTicker(flushInterval) for { @@ -673,6 +642,12 @@ func (c *context) logFlusher(stop <-chan int) { } } -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return toContext(&aeContext{req: req}) +} + +func logToLogservice() bool { + // TODO: replace logservice with json structured logs to $LOG_DIR/app.log.json + // where $LOG_DIR is /var/log in prod and some tmpdir in dev + return os.Getenv("LOG_TO_LOGSERVICE") != "0" } diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_classic.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_classic.go index f0f40b2e35c..87c33c798e8 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_classic.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_classic.go @@ -2,11 +2,13 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( + "context" "errors" "fmt" "net/http" @@ -17,20 +19,19 @@ import ( basepb "appengine_internal/base" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) var contextKey = "holds an appengine.Context" // fromContext returns the App Engine context or nil if ctx is not // derived from an App Engine context. -func fromContext(ctx netcontext.Context) appengine.Context { +func fromContext(ctx context.Context) appengine.Context { c, _ := ctx.Value(&contextKey).(appengine.Context) return c } // This is only for classic App Engine adapters. -func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { +func ClassicContextFromContext(ctx context.Context) (appengine.Context, error) { c := fromContext(ctx) if c == nil { return nil, errNotAppEngineContext @@ -38,8 +39,8 @@ func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error return c, nil } -func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) +func withContext(parent context.Context, c appengine.Context) context.Context { + ctx := context.WithValue(parent, &contextKey, c) s := &basepb.StringProto{} c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) @@ -50,7 +51,7 @@ func withContext(parent netcontext.Context, c appengine.Context) netcontext.Cont return ctx } -func IncomingHeaders(ctx netcontext.Context) http.Header { +func IncomingHeaders(ctx context.Context) http.Header { if c := fromContext(ctx); c != nil { if req, ok := c.Request().(*http.Request); ok { return req.Header @@ -59,11 +60,11 @@ func IncomingHeaders(ctx netcontext.Context) http.Header { return nil } -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) +func ReqContext(req *http.Request) context.Context { + return WithContext(context.Background(), req) } -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { +func WithContext(parent context.Context, req *http.Request) context.Context { c := appengine.NewContext(req) return withContext(parent, c) } @@ -83,11 +84,11 @@ func (t *testingContext) Call(service, method string, _, _ appengine_internal.Pr } func (t *testingContext) Request() interface{} { return t.req } -func ContextForTesting(req *http.Request) netcontext.Context { - return withContext(netcontext.Background(), &testingContext{req: req}) +func ContextForTesting(req *http.Request) context.Context { + return withContext(context.Background(), &testingContext{req: req}) } -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { +func Call(ctx context.Context, service, method string, in, out proto.Message) error { if ns := NamespaceFromContext(ctx); ns != "" { if fn, ok := NamespaceMods[service]; ok { fn(in, ns) @@ -144,8 +145,8 @@ func Call(ctx netcontext.Context, service, method string, in, out proto.Message) return err } -func handleHTTP(w http.ResponseWriter, r *http.Request) { - panic("handleHTTP called; this should be impossible") +func Middleware(next http.Handler) http.Handler { + panic("Middleware called; this should be impossible") } func logf(c appengine.Context, level int64, format string, args ...interface{}) { diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_common.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_common.go index e0c0b214b72..5b95c13d926 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_common.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/api_common.go @@ -5,20 +5,26 @@ package internal import ( + "context" "errors" "os" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" ) +type ctxKey string + +func (c ctxKey) String() string { + return "appengine context key: " + string(c) +} + var errNotAppEngineContext = errors.New("not an App Engine context") -type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error +type CallOverrideFunc func(ctx context.Context, service, method string, in, out proto.Message) error var callOverrideKey = "holds []CallOverrideFunc" -func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { +func WithCallOverride(ctx context.Context, f CallOverrideFunc) context.Context { // We avoid appending to any existing call override // so we don't risk overwriting a popped stack below. var cofs []CallOverrideFunc @@ -26,10 +32,10 @@ func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Con cofs = append(cofs, uf...) } cofs = append(cofs, f) - return netcontext.WithValue(ctx, &callOverrideKey, cofs) + return context.WithValue(ctx, &callOverrideKey, cofs) } -func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { +func callOverrideFromContext(ctx context.Context) (CallOverrideFunc, context.Context, bool) { cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) if len(cofs) == 0 { return nil, nil, false @@ -37,7 +43,7 @@ func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netconte // We found a list of overrides; grab the last, and reconstitute a // context that will hide it. f := cofs[len(cofs)-1] - ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + ctx = context.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) return f, ctx, true } @@ -45,23 +51,35 @@ type logOverrideFunc func(level int64, format string, args ...interface{}) var logOverrideKey = "holds a logOverrideFunc" -func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { - return netcontext.WithValue(ctx, &logOverrideKey, f) +func WithLogOverride(ctx context.Context, f logOverrideFunc) context.Context { + return context.WithValue(ctx, &logOverrideKey, f) } var appIDOverrideKey = "holds a string, being the full app ID" -func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { - return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +func WithAppIDOverride(ctx context.Context, appID string) context.Context { + return context.WithValue(ctx, &appIDOverrideKey, appID) +} + +var apiHostOverrideKey = ctxKey("holds a string, being the alternate API_HOST") + +func withAPIHostOverride(ctx context.Context, apiHost string) context.Context { + return context.WithValue(ctx, apiHostOverrideKey, apiHost) +} + +var apiPortOverrideKey = ctxKey("holds a string, being the alternate API_PORT") + +func withAPIPortOverride(ctx context.Context, apiPort string) context.Context { + return context.WithValue(ctx, apiPortOverrideKey, apiPort) } var namespaceKey = "holds the namespace string" -func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { - return netcontext.WithValue(ctx, &namespaceKey, ns) +func withNamespace(ctx context.Context, ns string) context.Context { + return context.WithValue(ctx, &namespaceKey, ns) } -func NamespaceFromContext(ctx netcontext.Context) string { +func NamespaceFromContext(ctx context.Context) string { // If there's no namespace, return the empty string. ns, _ := ctx.Value(&namespaceKey).(string) return ns @@ -70,14 +88,14 @@ func NamespaceFromContext(ctx netcontext.Context) string { // FullyQualifiedAppID returns the fully-qualified application ID. // This may contain a partition prefix (e.g. "s~" for High Replication apps), // or a domain prefix (e.g. "example.com:"). -func FullyQualifiedAppID(ctx netcontext.Context) string { +func FullyQualifiedAppID(ctx context.Context) string { if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { return id } return fullyQualifiedAppID(ctx) } -func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { +func Logf(ctx context.Context, level int64, format string, args ...interface{}) { if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { f(level, format, args...) return @@ -90,7 +108,7 @@ func Logf(ctx netcontext.Context, level int64, format string, args ...interface{ } // NamespacedContext wraps a Context to support namespaces. -func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { +func NamespacedContext(ctx context.Context, namespace string) context.Context { return withNamespace(ctx, namespace) } diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity.go index 9b4134e4257..0f95aa91d5b 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity.go @@ -5,9 +5,8 @@ package internal import ( + "context" "os" - - netcontext "golang.org/x/net/context" ) var ( @@ -23,7 +22,7 @@ var ( // AppID is the implementation of the wrapper function of the same name in // ../identity.go. See that file for commentary. -func AppID(c netcontext.Context) string { +func AppID(c context.Context) string { return appID(FullyQualifiedAppID(c)) } @@ -35,7 +34,7 @@ func IsStandard() bool { return appengineStandard || IsSecondGen() } -// IsStandard is the implementation of the wrapper function of the same name in +// IsSecondGen is the implementation of the wrapper function of the same name in // ../appengine.go. See that file for commentary. func IsSecondGen() bool { // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_classic.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_classic.go index 4e979f45e34..5ad3548bf74 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -2,21 +2,22 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal import ( - "appengine" + "context" - netcontext "golang.org/x/net/context" + "appengine" ) func init() { appengineStandard = true } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -24,12 +25,12 @@ func DefaultVersionHostname(ctx netcontext.Context) string { return appengine.DefaultVersionHostname(c) } -func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } -func ServerSoftware() string { return appengine.ServerSoftware() } -func InstanceID() string { return appengine.InstanceID() } -func IsDevAppServer() bool { return appengine.IsDevAppServer() } +func Datacenter(_ context.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -37,14 +38,14 @@ func RequestID(ctx netcontext.Context) string { return appengine.RequestID(c) } -func ModuleName(ctx netcontext.Context) string { +func ModuleName(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) } return appengine.ModuleName(c) } -func VersionID(ctx netcontext.Context) string { +func VersionID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) @@ -52,7 +53,7 @@ func VersionID(ctx netcontext.Context) string { return appengine.VersionID(c) } -func fullyQualifiedAppID(ctx netcontext.Context) string { +func fullyQualifiedAppID(ctx context.Context) string { c := fromContext(ctx) if c == nil { panic(errNotAppEngineContext) diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_flex.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_flex.go index d5e2e7b5e3f..4201b6b585a 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_flex.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appenginevm // +build appenginevm package internal diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_vm.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_vm.go index 5d806726355..18ddda3a423 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_vm.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -2,17 +2,17 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal import ( + "context" "log" "net/http" "os" "strings" - - netcontext "golang.org/x/net/context" ) // These functions are implementations of the wrapper functions @@ -24,7 +24,7 @@ const ( hDatacenter = "X-AppEngine-Datacenter" ) -func ctxHeaders(ctx netcontext.Context) http.Header { +func ctxHeaders(ctx context.Context) http.Header { c := fromContext(ctx) if c == nil { return nil @@ -32,15 +32,15 @@ func ctxHeaders(ctx netcontext.Context) http.Header { return c.Request().Header } -func DefaultVersionHostname(ctx netcontext.Context) string { +func DefaultVersionHostname(ctx context.Context) string { return ctxHeaders(ctx).Get(hDefaultVersionHostname) } -func RequestID(ctx netcontext.Context) string { +func RequestID(ctx context.Context) string { return ctxHeaders(ctx).Get(hRequestLogId) } -func Datacenter(ctx netcontext.Context) string { +func Datacenter(ctx context.Context) string { if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { return dc } @@ -71,7 +71,7 @@ func ServerSoftware() string { // TODO(dsymonds): Remove the metadata fetches. -func ModuleName(_ netcontext.Context) string { +func ModuleName(_ context.Context) string { if s := os.Getenv("GAE_MODULE_NAME"); s != "" { return s } @@ -81,7 +81,7 @@ func ModuleName(_ netcontext.Context) string { return string(mustGetMetadata("instance/attributes/gae_backend_name")) } -func VersionID(_ netcontext.Context) string { +func VersionID(_ context.Context) string { if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { return s1 + "." + s2 } @@ -112,7 +112,7 @@ func partitionlessAppID() string { return string(mustGetMetadata("instance/attributes/gae_project")) } -func fullyQualifiedAppID(_ netcontext.Context) string { +func fullyQualifiedAppID(_ context.Context) string { if s := os.Getenv("GAE_APPLICATION"); s != "" { return s } @@ -130,5 +130,5 @@ func fullyQualifiedAppID(_ netcontext.Context) string { } func IsDevAppServer() bool { - return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" || os.Getenv("GAE_ENV") == "localdev" } diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/main.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/main.go index 1e765312fd1..afd0ae84fdf 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/main.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/main.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build appengine // +build appengine package internal diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/main_vm.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/main_vm.go index ddb79a33387..86a8caf06f3 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/main_vm.go @@ -2,6 +2,7 @@ // Use of this source code is governed by the Apache 2.0 // license that can be found in the LICENSE file. +//go:build !appengine // +build !appengine package internal @@ -29,7 +30,7 @@ func Main() { if IsDevAppServer() { host = "127.0.0.1" } - if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + if err := http.ListenAndServe(host+":"+port, Middleware(http.DefaultServeMux)); err != nil { log.Fatalf("http.ListenAndServe: %v", err) } } diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/internal/transaction.go b/.ci/providerlint/vendor/google.golang.org/appengine/internal/transaction.go index 9006ae65380..2ae8ab9fa42 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/internal/transaction.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/internal/transaction.go @@ -7,11 +7,11 @@ package internal // This file implements hooks for applying datastore transactions. import ( + "context" "errors" "reflect" "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" basepb "google.golang.org/appengine/internal/base" pb "google.golang.org/appengine/internal/datastore" @@ -38,13 +38,13 @@ func applyTransaction(pb proto.Message, t *pb.Transaction) { var transactionKey = "used for *Transaction" -func transactionFromContext(ctx netcontext.Context) *transaction { +func transactionFromContext(ctx context.Context) *transaction { t, _ := ctx.Value(&transactionKey).(*transaction) return t } -func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { - return netcontext.WithValue(ctx, &transactionKey, t) +func withTransaction(ctx context.Context, t *transaction) context.Context { + return context.WithValue(ctx, &transactionKey, t) } type transaction struct { @@ -54,7 +54,7 @@ type transaction struct { var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") -func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { +func RunTransactionOnce(c context.Context, f func(context.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { if transactionFromContext(c) != nil { return nil, errors.New("nested transactions are not supported") } diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/namespace.go b/.ci/providerlint/vendor/google.golang.org/appengine/namespace.go index 21860ca0822..6f169be487d 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/namespace.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/namespace.go @@ -5,11 +5,10 @@ package appengine import ( + "context" "fmt" "regexp" - "golang.org/x/net/context" - "google.golang.org/appengine/internal" ) diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/timeout.go b/.ci/providerlint/vendor/google.golang.org/appengine/timeout.go index 05642a992a3..fcf3ad0a58f 100644 --- a/.ci/providerlint/vendor/google.golang.org/appengine/timeout.go +++ b/.ci/providerlint/vendor/google.golang.org/appengine/timeout.go @@ -4,7 +4,7 @@ package appengine -import "golang.org/x/net/context" +import "context" // IsTimeoutError reports whether err is a timeout error. func IsTimeoutError(err error) bool { diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/travis_install.sh b/.ci/providerlint/vendor/google.golang.org/appengine/travis_install.sh deleted file mode 100644 index 785b62f46e8..00000000000 --- a/.ci/providerlint/vendor/google.golang.org/appengine/travis_install.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -if [[ $GO111MODULE == "on" ]]; then - go get . -else - go get -u -v $(go list -f '{{join .Imports "\n"}}{{"\n"}}{{join .TestImports "\n"}}' ./... | sort | uniq | grep -v appengine) -fi - -if [[ $GOAPP == "true" ]]; then - mkdir /tmp/sdk - curl -o /tmp/sdk.zip "https://storage.googleapis.com/appengine-sdks/featured/go_appengine_sdk_linux_amd64-1.9.68.zip" - unzip -q /tmp/sdk.zip -d /tmp/sdk - # NOTE: Set the following env vars in the test script: - # export PATH="$PATH:/tmp/sdk/go_appengine" - # export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py -fi - diff --git a/.ci/providerlint/vendor/google.golang.org/appengine/travis_test.sh b/.ci/providerlint/vendor/google.golang.org/appengine/travis_test.sh deleted file mode 100644 index d4390f045b6..00000000000 --- a/.ci/providerlint/vendor/google.golang.org/appengine/travis_test.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -go version -go test -v google.golang.org/appengine/... -go test -v -race google.golang.org/appengine/... -if [[ $GOAPP == "true" ]]; then - export PATH="$PATH:/tmp/sdk/go_appengine" - export APPENGINE_DEV_APPSERVER=/tmp/sdk/go_appengine/dev_appserver.py - goapp version - goapp test -v google.golang.org/appengine/... -fi diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/README.md b/.ci/providerlint/vendor/google.golang.org/grpc/README.md index 0e6ae69a584..ab0fbb79b86 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/README.md +++ b/.ci/providerlint/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the @@ -14,21 +14,14 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the ## Installation -With [Go module][] support (Go 1.11+), simply add the following import +Simply add the following import to your code, and then `go [build|run|test]` +will automatically fetch the necessary dependencies: + ```go import "google.golang.org/grpc" ``` -to your code, and then `go [build|run|test]` will automatically fetch the -necessary dependencies. - -Otherwise, to install the `grpc-go` package, run the following command: - -```console -$ go get -u google.golang.org/grpc -``` - > **Note:** If you are trying to access `grpc-go` from **China**, see the > [FAQ](#FAQ) below. @@ -56,15 +49,6 @@ To build Go code, there are several options: - Set up a VPN and access google.golang.org through that. -- Without Go module support: `git clone` the repo manually: - - ```sh - git clone https://github.com/grpc/grpc-go.git $GOPATH/src/google.golang.org/grpc - ``` - - You will need to do the same for all of grpc's dependencies in `golang.org`, - e.g. `golang.org/x/net`. - - With Go module support: it is possible to use the `replace` feature of `go mod` to create aliases for golang.org packages. In your project's directory: @@ -76,33 +60,13 @@ To build Go code, there are several options: ``` Again, this will need to be done for all transitive dependencies hosted on - golang.org as well. For details, refer to [golang/go issue #28652](https://github.com/golang/go/issues/28652). + golang.org as well. For details, refer to [golang/go issue + #28652](https://github.com/golang/go/issues/28652). ### Compiling error, undefined: grpc.SupportPackageIsVersion -#### If you are using Go modules: - -Ensure your gRPC-Go version is `require`d at the appropriate version in -the same module containing the generated `.pb.go` files. For example, -`SupportPackageIsVersion6` needs `v1.27.0`, so in your `go.mod` file: - -```go -module - -require ( - google.golang.org/grpc v1.27.0 -) -``` - -#### If you are *not* using Go modules: - -Update the `proto` package, gRPC package, and rebuild the `.proto` files: - -```sh -go get -u github.com/golang/protobuf/{proto,protoc-gen-go} -go get -u google.golang.org/grpc -protoc --go_out=plugins=grpc:. *.proto -``` +Please update to the latest version of gRPC-Go using +`go get google.golang.org/grpc`. ### How to turn on logging @@ -121,9 +85,11 @@ possible reasons, including: 1. mis-configured transport credentials, connection failed on handshaking 1. bytes disrupted, possibly by a proxy in between 1. server shutdown - 1. Keepalive parameters caused connection shutdown, for example if you have configured - your server to terminate connections regularly to [trigger DNS lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). - If this is the case, you may want to increase your [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), + 1. Keepalive parameters caused connection shutdown, for example if you have + configured your server to terminate connections regularly to [trigger DNS + lookups](https://github.com/grpc/grpc-go/issues/3170#issuecomment-552517779). + If this is the case, you may want to increase your + [MaxConnectionAgeGrace](https://pkg.go.dev/google.golang.org/grpc/keepalive?tab=doc#ServerParameters), to allow longer RPC calls to finish. It can be tricky to debug this because the error happens on the client side but diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/attributes/attributes.go b/.ci/providerlint/vendor/google.golang.org/grpc/attributes/attributes.go index 49712aca33a..52d530d7ad0 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/attributes/attributes.go @@ -34,26 +34,26 @@ import ( // key/value pairs. Keys must be hashable, and users should define their own // types for keys. Values should not be modified after they are added to an // Attributes or if they were received from one. If values implement 'Equal(o -// interface{}) bool', it will be called by (*Attributes).Equal to determine -// whether two values with the same key should be considered equal. +// any) bool', it will be called by (*Attributes).Equal to determine whether +// two values with the same key should be considered equal. type Attributes struct { - m map[interface{}]interface{} + m map[any]any } // New returns a new Attributes containing the key/value pair. -func New(key, value interface{}) *Attributes { - return &Attributes{m: map[interface{}]interface{}{key: value}} +func New(key, value any) *Attributes { + return &Attributes{m: map[any]any{key: value}} } // WithValue returns a new Attributes containing the previous keys and values // and the new key/value pair. If the same key appears multiple times, the // last value overwrites all previous values for that key. To remove an // existing key, use a nil value. value should not be modified later. -func (a *Attributes) WithValue(key, value interface{}) *Attributes { +func (a *Attributes) WithValue(key, value any) *Attributes { if a == nil { return New(key, value) } - n := &Attributes{m: make(map[interface{}]interface{}, len(a.m)+1)} + n := &Attributes{m: make(map[any]any, len(a.m)+1)} for k, v := range a.m { n.m[k] = v } @@ -63,20 +63,19 @@ func (a *Attributes) WithValue(key, value interface{}) *Attributes { // Value returns the value associated with these attributes for key, or nil if // no value is associated with key. The returned value should not be modified. -func (a *Attributes) Value(key interface{}) interface{} { +func (a *Attributes) Value(key any) any { if a == nil { return nil } return a.m[key] } -// Equal returns whether a and o are equivalent. If 'Equal(o interface{}) -// bool' is implemented for a value in the attributes, it is called to -// determine if the value matches the one stored in the other attributes. If -// Equal is not implemented, standard equality is used to determine if the two -// values are equal. Note that some types (e.g. maps) aren't comparable by -// default, so they must be wrapped in a struct, or in an alias type, with Equal -// defined. +// Equal returns whether a and o are equivalent. If 'Equal(o any) bool' is +// implemented for a value in the attributes, it is called to determine if the +// value matches the one stored in the other attributes. If Equal is not +// implemented, standard equality is used to determine if the two values are +// equal. Note that some types (e.g. maps) aren't comparable by default, so +// they must be wrapped in a struct, or in an alias type, with Equal defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true @@ -93,7 +92,7 @@ func (a *Attributes) Equal(o *Attributes) bool { // o missing element of a return false } - if eq, ok := v.(interface{ Equal(o interface{}) bool }); ok { + if eq, ok := v.(interface{ Equal(o any) bool }); ok { if !eq.Equal(ov) { return false } @@ -122,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x interface{}) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/balancer/balancer.go b/.ci/providerlint/vendor/google.golang.org/grpc/balancer/balancer.go index 8f00523c0e2..d79560a2e26 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } @@ -105,8 +120,8 @@ type SubConn interface { // // This will trigger a state transition for the SubConn. // - // Deprecated: This method is now part of the ClientConn interface and will - // eventually be removed from here. + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses([]resolver.Address) // Connect starts the connecting for this SubConn. Connect() @@ -115,6 +130,13 @@ type SubConn interface { // creates a new one and returns it. Returns a close function which must // be called when the Producer is no longer needed. GetOrBuildProducer(ProducerBuilder) (p Producer, close func()) + // Shutdown shuts down the SubConn gracefully. Any started RPCs will be + // allowed to complete. No future calls should be made on the SubConn. + // One final state update will be delivered to the StateListener (or + // UpdateSubConnState; deprecated) with ConnectivityState of Shutdown to + // indicate the shutdown operation. This may be delivered before + // in-progress RPCs are complete and the actual connection is closed. + Shutdown() } // NewSubConnOptions contains options to create new SubConn. @@ -129,6 +151,11 @@ type NewSubConnOptions struct { // HealthCheckEnabled indicates whether health check service should be // enabled on this SubConn HealthCheckEnabled bool + // StateListener is called when the state of the subconn changes. If nil, + // Balancer.UpdateSubConnState will be called instead. Will never be + // invoked until after Connect() is called on the SubConn created with + // these options. + StateListener func(SubConnState) } // State contains the balancer's state relevant to the gRPC ClientConn. @@ -150,16 +177,24 @@ type ClientConn interface { // NewSubConn is called by balancer to create a new SubConn. // It doesn't block and wait for the connections to be established. // Behaviors of the SubConn can be controlled by options. + // + // Deprecated: please be aware that in a future version, SubConns will only + // support one address per SubConn. NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) // RemoveSubConn removes the SubConn from ClientConn. // The SubConn will be shutdown. + // + // Deprecated: use SubConn.Shutdown instead. RemoveSubConn(SubConn) // UpdateAddresses updates the addresses used in the passed in SubConn. // gRPC checks if the currently connected address is still in the new list. // If so, the connection will be kept. Else, the connection will be // gracefully closed, and a new connection will be created. // - // This will trigger a state transition for the SubConn. + // This may trigger a state transition for the SubConn. + // + // Deprecated: this method will be removed. Create new SubConns for new + // addresses instead. UpdateAddresses(SubConn, []resolver.Address) // UpdateState notifies gRPC that the balancer's internal state has @@ -250,7 +285,7 @@ type DoneInfo struct { // trailing metadata. // // The only supported type now is *orca_v3.LoadReport. - ServerLoad interface{} + ServerLoad any } var ( @@ -343,9 +378,13 @@ type Balancer interface { ResolverError(error) // UpdateSubConnState is called by gRPC when the state of a SubConn // changes. + // + // Deprecated: Use NewSubConnOptions.StateListener when creating the + // SubConn instead. UpdateSubConnState(SubConn, SubConnState) - // Close closes the balancer. The balancer is not required to call - // ClientConn.RemoveSubConn for its existing SubConns. + // Close closes the balancer. The balancer is not currently required to + // call SubConn.Shutdown for its existing SubConns; however, this will be + // required in a future release, so it is recommended. Close() } @@ -390,15 +429,14 @@ var ErrBadResolverState = errors.New("bad resolver state") type ProducerBuilder interface { // Build creates a Producer. The first parameter is always a // grpc.ClientConnInterface (a type to allow creating RPCs/streams on the - // associated SubConn), but is declared as interface{} to avoid a - // dependency cycle. Should also return a close function that will be - // called when all references to the Producer have been given up. - Build(grpcClientConnInterface interface{}) (p Producer, close func()) + // associated SubConn), but is declared as `any` to avoid a dependency + // cycle. Should also return a close function that will be called when all + // references to the Producer have been given up. + Build(grpcClientConnInterface any) (p Producer, close func()) } // A Producer is a type shared among potentially many consumers. It is // associated with a SubConn, and an implementation will typically contain // other methods to provide additional functionality, e.g. configuration or // subscription registration. -type Producer interface { -} +type Producer any diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/balancer/base/balancer.go b/.ci/providerlint/vendor/google.golang.org/grpc/balancer/base/balancer.go index 3929c26d31e..a7f1eeec8e6 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/balancer/base/balancer.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -105,7 +105,12 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { addrsSet.Set(a, nil) if _, ok := b.subConns.Get(a); !ok { // a is a new address (not existing in b.subConns). - sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{HealthCheckEnabled: b.config.HealthCheck}) + var sc balancer.SubConn + opts := balancer.NewSubConnOptions{ + HealthCheckEnabled: b.config.HealthCheck, + StateListener: func(scs balancer.SubConnState) { b.updateSubConnState(sc, scs) }, + } + sc, err := b.cc.NewSubConn([]resolver.Address{a}, opts) if err != nil { logger.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) continue @@ -121,10 +126,10 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error { sc := sci.(balancer.SubConn) // a was removed by resolver. if _, ok := addrsSet.Get(a); !ok { - b.cc.RemoveSubConn(sc) + sc.Shutdown() b.subConns.Delete(a) // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. - // The entry will be deleted in UpdateSubConnState. + // The entry will be deleted in updateSubConnState. } } // If resolver state contains no addresses, return an error so ClientConn @@ -177,7 +182,12 @@ func (b *baseBalancer) regeneratePicker() { b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs}) } +// UpdateSubConnState is a nop because a StateListener is always set in NewSubConn. func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + logger.Errorf("base.baseBalancer: UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) +} + +func (b *baseBalancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { s := state.ConnectivityState if logger.V(2) { logger.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) @@ -204,8 +214,8 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su case connectivity.Idle: sc.Connect() case connectivity.Shutdown: - // When an address was removed by resolver, b called RemoveSubConn but - // kept the sc's state in scStates. Remove state for this sc here. + // When an address was removed by resolver, b called Shutdown but kept + // the sc's state in scStates. Remove state for this sc here. delete(b.scStates, sc) case connectivity.TransientFailure: // Save error to be reported via picker. @@ -226,7 +236,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su } // Close is a nop because base balancer doesn't have internal state to clean up, -// and it doesn't need to call RemoveSubConn for the SubConns. +// and it doesn't need to call Shutdown for the SubConns. func (b *baseBalancer) Close() { } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/.ci/providerlint/vendor/google.golang.org/grpc/balancer_wrapper.go similarity index 52% rename from .ci/providerlint/vendor/google.golang.org/grpc/balancer_conn_wrappers.go rename to .ci/providerlint/vendor/google.golang.org/grpc/balancer_wrapper.go index 04b9ad41169..b5e30cff021 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/balancer_conn_wrappers.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/balancer_wrapper.go @@ -32,21 +32,13 @@ import ( "google.golang.org/grpc/resolver" ) -type ccbMode int - -const ( - ccbModeActive = iota - ccbModeIdle - ccbModeClosed - ccbModeExitingIdle -) - // ccBalancerWrapper sits between the ClientConn and the Balancer. // // ccBalancerWrapper implements methods corresponding to the ones on the // balancer.Balancer interface. The ClientConn is free to call these methods // concurrently and the ccBalancerWrapper ensures that calls from the ClientConn -// to the Balancer happen synchronously and in order. +// to the Balancer happen in order by performing them in the serializer, without +// any mutexes held. // // ccBalancerWrapper also implements the balancer.ClientConn interface and is // passed to the Balancer implementations. It invokes unexported methods on the @@ -57,99 +49,75 @@ const ( type ccBalancerWrapper struct { // The following fields are initialized when the wrapper is created and are // read-only afterwards, and therefore can be accessed without a mutex. - cc *ClientConn - opts balancer.BuildOptions + cc *ClientConn + opts balancer.BuildOptions + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc - // Outgoing (gRPC --> balancer) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled in the serializer. Fields - // accessed *only* in these serializer callbacks, can therefore be accessed - // without a mutex. - balancer *gracefulswitch.Balancer + // The following fields are only accessed within the serializer or during + // initialization. curBalancerName string + balancer *gracefulswitch.Balancer - // mu guards access to the below fields. Access to the serializer and its - // cancel function needs to be mutex protected because they are overwritten - // when the wrapper exits idle mode. - mu sync.Mutex - serializer *grpcsync.CallbackSerializer // To serialize all outoing calls. - serializerCancel context.CancelFunc // To close the seralizer at close/enterIdle time. - mode ccbMode // Tracks the current mode of the wrapper. + // The following field is protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + closed bool } -// newCCBalancerWrapper creates a new balancer wrapper. The underlying balancer -// is not created until the switchTo() method is invoked. -func newCCBalancerWrapper(cc *ClientConn, bopts balancer.BuildOptions) *ccBalancerWrapper { - ctx, cancel := context.WithCancel(context.Background()) +// newCCBalancerWrapper creates a new balancer wrapper in idle state. The +// underlying balancer is not created until the switchTo() method is invoked. +func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper { + ctx, cancel := context.WithCancel(cc.ctx) ccb := &ccBalancerWrapper{ - cc: cc, - opts: bopts, + cc: cc, + opts: balancer.BuildOptions{ + DialCreds: cc.dopts.copts.TransportCredentials, + CredsBundle: cc.dopts.copts.CredsBundle, + Dialer: cc.dopts.copts.Dialer, + Authority: cc.authority, + CustomUserAgent: cc.dopts.copts.UserAgent, + ChannelzParentID: cc.channelzID, + Target: cc.parsedTarget, + }, serializer: grpcsync.NewCallbackSerializer(ctx), serializerCancel: cancel, } - ccb.balancer = gracefulswitch.NewBalancer(ccb, bopts) + ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) return ccb } // updateClientConnState is invoked by grpc to push a ClientConnState update to -// the underlying balancer. +// the underlying balancer. This is always executed from the serializer, so +// it is safe to call into the balancer here. func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error { - ccb.mu.Lock() - errCh := make(chan error, 1) - // Here and everywhere else where Schedule() is called, it is done with the - // lock held. But the lock guards only the scheduling part. The actual - // callback is called asynchronously without the lock being held. - ok := ccb.serializer.Schedule(func(_ context.Context) { - // If the addresses specified in the update contain addresses of type - // "grpclb" and the selected LB policy is not "grpclb", these addresses - // will be filtered out and ccs will be modified with the updated - // address list. - if ccb.curBalancerName != grpclbName { - var addrs []resolver.Address - for _, addr := range ccs.ResolverState.Addresses { - if addr.Type == resolver.GRPCLB { - continue - } - addrs = append(addrs, addr) - } - ccs.ResolverState.Addresses = addrs + errCh := make(chan error) + ok := ccb.serializer.Schedule(func(ctx context.Context) { + defer close(errCh) + if ctx.Err() != nil || ccb.balancer == nil { + return + } + err := ccb.balancer.UpdateClientConnState(*ccs) + if logger.V(2) && err != nil { + logger.Infof("error from balancer.UpdateClientConnState: %v", err) } - errCh <- ccb.balancer.UpdateClientConnState(*ccs) + errCh <- err }) if !ok { - // If we are unable to schedule a function with the serializer, it - // indicates that it has been closed. A serializer is only closed when - // the wrapper is closed or is in idle. - ccb.mu.Unlock() - return fmt.Errorf("grpc: cannot send state update to a closed or idle balancer") - } - ccb.mu.Unlock() - - // We get here only if the above call to Schedule succeeds, in which case it - // is guaranteed that the scheduled function will run. Therefore it is safe - // to block on this channel. - err := <-errCh - if logger.V(2) && err != nil { - logger.Infof("error from balancer.UpdateClientConnState: %v", err) + return nil } - return err -} - -// updateSubConnState is invoked by grpc to push a subConn state update to the -// underlying balancer. -func (ccb *ccBalancerWrapper) updateSubConnState(sc balancer.SubConn, s connectivity.State, err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { - ccb.balancer.UpdateSubConnState(sc, balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) - }) - ccb.mu.Unlock() + return <-errCh } +// resolverError is invoked by grpc to push a resolver error to the underlying +// balancer. The call to the balancer is executed from the serializer. func (ccb *ccBalancerWrapper) resolverError(err error) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } ccb.balancer.ResolverError(err) }) - ccb.mu.Unlock() } // switchTo is invoked by grpc to instruct the balancer wrapper to switch to the @@ -163,8 +131,10 @@ func (ccb *ccBalancerWrapper) resolverError(err error) { // the ccBalancerWrapper keeps track of the current LB policy name, and skips // the graceful balancer switching process if the name does not change. func (ccb *ccBalancerWrapper) switchTo(name string) { - ccb.mu.Lock() - ccb.serializer.Schedule(func(_ context.Context) { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } // TODO: Other languages use case-sensitive balancer registries. We should // switch as well. See: https://github.com/grpc/grpc-go/issues/5288. if strings.EqualFold(ccb.curBalancerName, name) { @@ -172,7 +142,6 @@ func (ccb *ccBalancerWrapper) switchTo(name string) { } ccb.buildLoadBalancingPolicy(name) }) - ccb.mu.Unlock() } // buildLoadBalancingPolicy performs the following: @@ -199,151 +168,69 @@ func (ccb *ccBalancerWrapper) buildLoadBalancingPolicy(name string) { ccb.curBalancerName = builder.Name() } +// close initiates async shutdown of the wrapper. cc.mu must be held when +// calling this function. To determine the wrapper has finished shutting down, +// the channel should block on ccb.serializer.Done() without cc.mu held. func (ccb *ccBalancerWrapper) close() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") - ccb.closeBalancer(ccbModeClosed) -} - -// enterIdleMode is invoked by grpc when the channel enters idle mode upon -// expiry of idle_timeout. This call blocks until the balancer is closed. -func (ccb *ccBalancerWrapper) enterIdleMode() { - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: entering idle mode") - ccb.closeBalancer(ccbModeIdle) -} - -// closeBalancer is invoked when the channel is being closed or when it enters -// idle mode upon expiry of idle_timeout. -func (ccb *ccBalancerWrapper) closeBalancer(m ccbMode) { ccb.mu.Lock() - if ccb.mode == ccbModeClosed || ccb.mode == ccbModeIdle { - ccb.mu.Unlock() - return - } - - ccb.mode = m - done := ccb.serializer.Done - b := ccb.balancer - ok := ccb.serializer.Schedule(func(_ context.Context) { - // Close the serializer to ensure that no more calls from gRPC are sent - // to the balancer. - ccb.serializerCancel() - // Empty the current balancer name because we don't have a balancer - // anymore and also so that we act on the next call to switchTo by - // creating a new balancer specified by the new resolver. - ccb.curBalancerName = "" - }) - if !ok { - ccb.mu.Unlock() - return - } + ccb.closed = true ccb.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-done - // Spawn a goroutine to close the balancer (since it may block trying to - // cleanup all allocated resources) and return early. - go b.Close() -} - -// exitIdleMode is invoked by grpc when the channel exits idle mode either -// because of an RPC or because of an invocation of the Connect() API. This -// recreates the balancer that was closed previously when entering idle mode. -// -// If the channel is not in idle mode, we know for a fact that we are here as a -// result of the user calling the Connect() method on the ClientConn. In this -// case, we can simply forward the call to the underlying balancer, instructing -// it to reconnect to the backends. -func (ccb *ccBalancerWrapper) exitIdleMode() { - ccb.mu.Lock() - if ccb.mode == ccbModeClosed { - // Request to exit idle is a no-op when wrapper is already closed. - ccb.mu.Unlock() - return - } - - if ccb.mode == ccbModeIdle { - // Recreate the serializer which was closed when we entered idle. - ctx, cancel := context.WithCancel(context.Background()) - ccb.serializer = grpcsync.NewCallbackSerializer(ctx) - ccb.serializerCancel = cancel - } - - // The ClientConn guarantees that mutual exclusion between close() and - // exitIdleMode(), and since we just created a new serializer, we can be - // sure that the below function will be scheduled. - done := make(chan struct{}) - ccb.serializer.Schedule(func(_ context.Context) { - defer close(done) - - ccb.mu.Lock() - defer ccb.mu.Unlock() - - if ccb.mode != ccbModeIdle { - ccb.balancer.ExitIdle() + channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: closing") + ccb.serializer.Schedule(func(context.Context) { + if ccb.balancer == nil { return } - - // Gracefulswitch balancer does not support a switchTo operation after - // being closed. Hence we need to create a new one here. - ccb.balancer = gracefulswitch.NewBalancer(ccb, ccb.opts) - ccb.mode = ccbModeActive - channelz.Info(logger, ccb.cc.channelzID, "ccBalancerWrapper: exiting idle mode") - + ccb.balancer.Close() + ccb.balancer = nil }) - ccb.mu.Unlock() - - <-done + ccb.serializerCancel() } -func (ccb *ccBalancerWrapper) isIdleOrClosed() bool { - ccb.mu.Lock() - defer ccb.mu.Unlock() - return ccb.mode == ccbModeIdle || ccb.mode == ccbModeClosed +// exitIdle invokes the balancer's exitIdle method in the serializer. +func (ccb *ccBalancerWrapper) exitIdle() { + ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccb.balancer == nil { + return + } + ccb.balancer.ExitIdle() + }) } func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { - if ccb.isIdleOrClosed() { - return nil, fmt.Errorf("grpc: cannot create SubConn when balancer is closed or idle") + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() + return nil, fmt.Errorf("balancer is being closed; no new SubConns allowed") } + ccb.mu.Unlock() if len(addrs) == 0 { return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") } - ac, err := ccb.cc.newAddrConn(addrs, opts) + ac, err := ccb.cc.newAddrConnLocked(addrs, opts) if err != nil { channelz.Warningf(logger, ccb.cc.channelzID, "acBalancerWrapper: NewSubConn: failed to newAddrConn: %v", err) return nil, err } - acbw := &acBalancerWrapper{ac: ac, producers: make(map[balancer.ProducerBuilder]*refCountedProducer)} + acbw := &acBalancerWrapper{ + ccb: ccb, + ac: ac, + producers: make(map[balancer.ProducerBuilder]*refCountedProducer), + stateListener: opts.StateListener, + } ac.acbw = acbw return acbw, nil } func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { - if ccb.isIdleOrClosed() { - // It it safe to ignore this call when the balancer is closed or in idle - // because the ClientConn takes care of closing the connections. - // - // Not returning early from here when the balancer is closed or in idle - // leads to a deadlock though, because of the following sequence of - // calls when holding cc.mu: - // cc.exitIdleMode --> ccb.enterIdleMode --> gsw.Close --> - // ccb.RemoveAddrConn --> cc.removeAddrConn - return - } - - acbw, ok := sc.(*acBalancerWrapper) - if !ok { - return - } - ccb.cc.removeAddrConn(acbw.ac, errConnDrain) + // The graceful switch balancer will never call this. + logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc") } func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { - if ccb.isIdleOrClosed() { - return - } - acbw, ok := sc.(*acBalancerWrapper) if !ok { return @@ -352,25 +239,39 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol } func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.Lock() + defer ccb.cc.mu.Unlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - + ccb.mu.Unlock() // Update picker before updating state. Even though the ordering here does // not matter, it can lead to multiple calls of Pick in the common start-up // case where we wait for ready and then perform an RPC. If the picker is // updated later, we could call the "connecting" picker when the state is // updated, and then call the "ready" picker after the picker gets updated. - ccb.cc.blockingpicker.updatePicker(s.Picker) + + // Note that there is no need to check if the balancer wrapper was closed, + // as we know the graceful switch LB policy will not call cc if it has been + // closed. + ccb.cc.pickerWrapper.updatePicker(s.Picker) ccb.cc.csMgr.updateState(s.ConnectivityState) } func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOptions) { - if ccb.isIdleOrClosed() { + ccb.cc.mu.RLock() + defer ccb.cc.mu.RUnlock() + + ccb.mu.Lock() + if ccb.closed { + ccb.mu.Unlock() return } - - ccb.cc.resolveNow(o) + ccb.mu.Unlock() + ccb.cc.resolveNowLocked(o) } func (ccb *ccBalancerWrapper) Target() string { @@ -380,12 +281,28 @@ func (ccb *ccBalancerWrapper) Target() string { // acBalancerWrapper is a wrapper on top of ac for balancers. // It implements balancer.SubConn interface. type acBalancerWrapper struct { - ac *addrConn // read-only + ac *addrConn // read-only + ccb *ccBalancerWrapper // read-only + stateListener func(balancer.SubConnState) mu sync.Mutex producers map[balancer.ProducerBuilder]*refCountedProducer } +// updateState is invoked by grpc to push a subConn state update to the +// underlying balancer. +func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) { + acbw.ccb.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || acbw.ccb.balancer == nil { + return + } + // Even though it is optional for balancers, gracefulswitch ensures + // opts.StateListener is set, so this cannot ever be nil. + // TODO: delete this comment when UpdateSubConnState is removed. + acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err}) + }) +} + func (acbw *acBalancerWrapper) String() string { return fmt.Sprintf("SubConn(id:%d)", acbw.ac.channelzID.Int()) } @@ -398,6 +315,10 @@ func (acbw *acBalancerWrapper) Connect() { go acbw.ac.connect() } +func (acbw *acBalancerWrapper) Shutdown() { + acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain) +} + // NewStream begins a streaming RPC on the addrConn. If the addrConn is not // ready, blocks until it is or ctx expires. Returns an error when the context // expires or the addrConn is shut down. @@ -411,7 +332,7 @@ func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, // Invoke performs a unary RPC. If the addrConn is not ready, returns // errSubConnNotReady. -func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error { +func (acbw *acBalancerWrapper) Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error { cs, err := acbw.NewStream(ctx, unaryStreamDesc, method, opts...) if err != nil { return err diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/.ci/providerlint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go index ec2c2fa14dd..5954801122a 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/binlog/v1/binarylog.proto diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/call.go b/.ci/providerlint/vendor/google.golang.org/grpc/call.go index e6a1dc5d75e..788c89c16f9 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/call.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/call.go @@ -26,12 +26,7 @@ import ( // received. This is typically called by generated code. // // All errors returned by Invoke are compatible with the status package. -func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return err - } - defer cc.idlenessMgr.onCallEnd() - +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply any, opts ...CallOption) error { // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -61,13 +56,13 @@ func combine(o1 []CallOption, o2 []CallOption) []CallOption { // received. This is typically called by generated code. // // DEPRECATED: Use ClientConn.Invoke instead. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func Invoke(ctx context.Context, method string, args, reply any, cc *ClientConn, opts ...CallOption) error { return cc.Invoke(ctx, method, args, reply, opts...) } var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} -func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { +func invoke(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { cs, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) if err != nil { return err diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/clientconn.go b/.ci/providerlint/vendor/google.golang.org/grpc/clientconn.go index bfd7555a8bf..e6f2625b684 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/clientconn.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/clientconn.go @@ -33,10 +33,10 @@ import ( "google.golang.org/grpc/balancer/base" "google.golang.org/grpc/codes" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/internal/backoff" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/idle" "google.golang.org/grpc/internal/pretty" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/transport" @@ -46,16 +46,14 @@ import ( "google.golang.org/grpc/status" _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. - _ "google.golang.org/grpc/internal/resolver/dns" // To register dns resolver. _ "google.golang.org/grpc/internal/resolver/passthrough" // To register passthrough resolver. _ "google.golang.org/grpc/internal/resolver/unix" // To register unix resolver. + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. ) const ( // minimum time to give a connection to complete minConnectTimeout = 20 * time.Second - // must match grpclbName in grpclb/grpclb.go - grpclbName = "grpclb" ) var ( @@ -119,48 +117,20 @@ func (dcs *defaultConfigSelector) SelectConfig(rpcInfo iresolver.RPCInfo) (*ires }, nil } -// DialContext creates a client connection to the given target. By default, it's -// a non-blocking dial (the function won't wait for connections to be -// established, and connecting happens in the background). To make it a blocking -// dial, use WithBlock() dial option. -// -// In the non-blocking case, the ctx does not act against the connection. It -// only controls the setup steps. -// -// In the blocking case, ctx can be used to cancel or expire the pending -// connection. Once this function returns, the cancellation and expiration of -// ctx will be noop. Users should call ClientConn.Close to terminate all the -// pending operations after this function returns. -// -// The target name syntax is defined in -// https://github.com/grpc/grpc/blob/master/doc/naming.md. -// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { +// newClient returns a new client in idle mode. +func newClient(target string, opts ...DialOption) (conn *ClientConn, err error) { cc := &ClientConn{ target: target, - csMgr: &connectivityStateManager{}, conns: make(map[*addrConn]struct{}), dopts: defaultDialOptions(), czData: new(channelzData), } - // We start the channel off in idle mode, but kick it out of idle at the end - // of this method, instead of waiting for the first RPC. Other gRPC - // implementations do wait for the first RPC to kick the channel out of - // idle. But doing so would be a major behavior change for our users who are - // used to seeing the channel active after Dial. - // - // Taking this approach of kicking it out of idle at the end of this method - // allows us to share the code between channel creation and exiting idle - // mode. This will also make it easy for us to switch to starting the - // channel off in idle, if at all we ever get to do that. - cc.idlenessState = ccIdlenessStateIdle - cc.retryThrottler.Store((*retryThrottler)(nil)) cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) cc.ctx, cc.cancel = context.WithCancel(context.Background()) - cc.exitIdleCond = sync.NewCond(&cc.mu) + // Apply dial options. disableGlobalOpts := false for _, opt := range opts { if _, ok := opt.(*disableGlobalDialOptions); ok { @@ -178,19 +148,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * for _, opt := range opts { opt.apply(&cc.dopts) } - chainUnaryClientInterceptors(cc) chainStreamClientInterceptors(cc) - defer func() { - if err != nil { - cc.Close() - } - }() - - // Register ClientConn with channelz. - cc.channelzRegistration(target) - if err := cc.validateTransportCredentials(); err != nil { return nil, err } @@ -204,10 +164,80 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } cc.mkp = cc.dopts.copts.KeepaliveParams - if cc.dopts.copts.UserAgent != "" { - cc.dopts.copts.UserAgent += " " + grpcUA - } else { - cc.dopts.copts.UserAgent = grpcUA + // Register ClientConn with channelz. + cc.channelzRegistration(target) + + // TODO: Ideally it should be impossible to error from this function after + // channelz registration. This will require removing some channelz logs + // from the following functions that can error. Errors can be returned to + // the user, and successful logs can be emitted here, after the checks have + // passed and channelz is subsequently registered. + + // Determine the resolver to use. + if err := cc.parseTargetAndFindResolver(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + if err = cc.determineAuthority(); err != nil { + channelz.RemoveEntry(cc.channelzID) + return nil, err + } + + cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelzID) + cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers) + + cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc. + cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout) + return cc, nil +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc, err := newClient(target, opts...) + if err != nil { + return nil, err + } + + // We start the channel off in idle mode, but kick it out of idle now, + // instead of waiting for the first RPC. Other gRPC implementations do wait + // for the first RPC to kick the channel out of idle. But doing so would be + // a major behavior change for our users who are used to seeing the channel + // active after Dial. + // + // Taking this approach of kicking it out of idle at the end of this method + // allows us to share the code between channel creation and exiting idle + // mode. This will also make it easy for us to switch to starting the + // channel off in idle, i.e. by making newClient exported. + + defer func() { + if err != nil { + cc.Close() + } + }() + + // This creates the name resolver, load balancer, etc. + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + return nil, err + } + + // Return now for non-blocking dials. + if !cc.dopts.block { + return cc, nil } if cc.dopts.timeout > 0 { @@ -230,49 +260,6 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn * } }() - if cc.dopts.bs == nil { - cc.dopts.bs = backoff.DefaultExponential - } - - // Determine the resolver to use. - if err := cc.parseTargetAndFindResolver(); err != nil { - return nil, err - } - if err = cc.determineAuthority(); err != nil { - return nil, err - } - - if cc.dopts.scChan != nil { - // Blocking wait for the initial service config. - select { - case sc, ok := <-cc.dopts.scChan: - if ok { - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - } - case <-ctx.Done(): - return nil, ctx.Err() - } - } - if cc.dopts.scChan != nil { - go cc.scWatcher() - } - - // This creates the name resolver, load balancer, blocking picker etc. - if err := cc.exitIdleMode(); err != nil { - return nil, err - } - - // Configure idleness support with configured idle timeout or default idle - // timeout duration. Idleness can be explicitly disabled by the user, by - // setting the dial option to 0. - cc.idlenessMgr = newIdlenessManager(cc, cc.dopts.idleTimeout) - - // Return early for non-blocking dials. - if !cc.dopts.block { - return cc, nil - } - // A blocking dial blocks until the clientConn is ready. for { s := cc.GetState() @@ -317,117 +304,82 @@ func (cc *ClientConn) addTraceEvent(msg string) { channelz.AddTraceEvent(logger, cc.channelzID, 0, ted) } +type idler ClientConn + +func (i *idler) EnterIdleMode() { + (*ClientConn)(i).enterIdleMode() +} + +func (i *idler) ExitIdleMode() error { + return (*ClientConn)(i).exitIdleMode() +} + // exitIdleMode moves the channel out of idle mode by recreating the name -// resolver and load balancer. -func (cc *ClientConn) exitIdleMode() error { +// resolver and load balancer. This should never be called directly; use +// cc.idlenessMgr.ExitIdleMode instead. +func (cc *ClientConn) exitIdleMode() (err error) { cc.mu.Lock() if cc.conns == nil { cc.mu.Unlock() return errConnClosing } - if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() - logger.Info("ClientConn asked to exit idle mode when not in idle mode") - return nil - } - - defer func() { - // When Close() and exitIdleMode() race against each other, one of the - // following two can happen: - // - Close() wins the race and runs first. exitIdleMode() runs after, and - // sees that the ClientConn is already closed and hence returns early. - // - exitIdleMode() wins the race and runs first and recreates the balancer - // and releases the lock before recreating the resolver. If Close() runs - // in this window, it will wait for exitIdleMode to complete. - // - // We achieve this synchronization using the below condition variable. - cc.mu.Lock() - cc.idlenessState = ccIdlenessStateActive - cc.exitIdleCond.Signal() - cc.mu.Unlock() - }() - - cc.idlenessState = ccIdlenessStateExitingIdle - exitedIdle := false - if cc.blockingpicker == nil { - cc.blockingpicker = newPickerWrapper() - } else { - cc.blockingpicker.exitIdleMode() - exitedIdle = true - } - - var credsClone credentials.TransportCredentials - if creds := cc.dopts.copts.TransportCredentials; creds != nil { - credsClone = creds.Clone() - } - if cc.balancerWrapper == nil { - cc.balancerWrapper = newCCBalancerWrapper(cc, balancer.BuildOptions{ - DialCreds: credsClone, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - Authority: cc.authority, - CustomUserAgent: cc.dopts.copts.UserAgent, - ChannelzParentID: cc.channelzID, - Target: cc.parsedTarget, - }) - } else { - cc.balancerWrapper.exitIdleMode() - } - cc.firstResolveEvent = grpcsync.NewEvent() cc.mu.Unlock() // This needs to be called without cc.mu because this builds a new resolver - // which might update state or report error inline which needs to be handled - // by cc.updateResolverState() which also grabs cc.mu. - if err := cc.initResolverWrapper(credsClone); err != nil { + // which might update state or report error inline, which would then need to + // acquire cc.mu. + if err := cc.resolverWrapper.start(); err != nil { return err } - if exitedIdle { - cc.addTraceEvent("exiting idle mode") - } + cc.addTraceEvent("exiting idle mode") return nil } +// initIdleStateLocked initializes common state to how it should be while idle. +func (cc *ClientConn) initIdleStateLocked() { + cc.resolverWrapper = newCCResolverWrapper(cc) + cc.balancerWrapper = newCCBalancerWrapper(cc) + cc.firstResolveEvent = grpcsync.NewEvent() + // cc.conns == nil is a proxy for the ClientConn being closed. So, instead + // of setting it to nil here, we recreate the map. This also means that we + // don't have to do this when exiting idle mode. + cc.conns = make(map[*addrConn]struct{}) +} + // enterIdleMode puts the channel in idle mode, and as part of it shuts down the -// name resolver, load balancer and any subchannels. -func (cc *ClientConn) enterIdleMode() error { +// name resolver, load balancer, and any subchannels. This should never be +// called directly; use cc.idlenessMgr.EnterIdleMode instead. +func (cc *ClientConn) enterIdleMode() { cc.mu.Lock() + if cc.conns == nil { cc.mu.Unlock() - return ErrClientConnClosing - } - if cc.idlenessState != ccIdlenessStateActive { - logger.Error("ClientConn asked to enter idle mode when not active") - return nil + return } - // cc.conns == nil is a proxy for the ClientConn being closed. So, instead - // of setting it to nil here, we recreate the map. This also means that we - // don't have to do this when exiting idle mode. conns := cc.conns - cc.conns = make(map[*addrConn]struct{}) - // TODO: Currently, we close the resolver wrapper upon entering idle mode - // and create a new one upon exiting idle mode. This means that the - // `cc.resolverWrapper` field would be overwritten everytime we exit idle - // mode. While this means that we need to hold `cc.mu` when accessing - // `cc.resolverWrapper`, it makes the code simpler in the wrapper. We should - // try to do the same for the balancer and picker wrappers too. - cc.resolverWrapper.close() - cc.blockingpicker.enterIdleMode() - cc.balancerWrapper.enterIdleMode() + rWrapper := cc.resolverWrapper + rWrapper.close() + cc.pickerWrapper.reset() + bWrapper := cc.balancerWrapper + bWrapper.close() cc.csMgr.updateState(connectivity.Idle) - cc.idlenessState = ccIdlenessStateIdle + cc.addTraceEvent("entering idle mode") + + cc.initIdleStateLocked() + cc.mu.Unlock() - go func() { - cc.addTraceEvent("entering idle mode") - for ac := range conns { - ac.tearDown(errConnIdling) - } - }() - return nil + // Block until the name resolver and LB policy are closed. + <-rWrapper.serializer.Done() + <-bWrapper.serializer.Done() + + // Close all subchannels after the LB policy is closed. + for ac := range conns { + ac.tearDown(errConnIdling) + } } // validateTransportCredentials performs a series of checks on the configured @@ -475,7 +427,6 @@ func (cc *ClientConn) validateTransportCredentials() error { func (cc *ClientConn) channelzRegistration(target string) { cc.channelzID = channelz.RegisterChannel(&channelzChannel{cc}, cc.dopts.channelzParentID, target) cc.addTraceEvent("created") - cc.csMgr.channelzID = cc.channelzID } // chainUnaryClientInterceptors chains all unary client interceptors into one. @@ -492,7 +443,7 @@ func chainUnaryClientInterceptors(cc *ClientConn) { } else if len(interceptors) == 1 { chainedInt = interceptors[0] } else { - chainedInt = func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { + chainedInt = func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error { return interceptors[0](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, 0, invoker), opts...) } } @@ -504,7 +455,7 @@ func getChainUnaryInvoker(interceptors []UnaryClientInterceptor, curr int, final if curr == len(interceptors)-1 { return finalInvoker } - return func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error { return interceptors[curr+1](ctx, method, req, reply, cc, getChainUnaryInvoker(interceptors, curr+1, finalInvoker), opts...) } } @@ -540,13 +491,27 @@ func getChainStreamer(interceptors []StreamClientInterceptor, curr int, finalStr } } +// newConnectivityStateManager creates an connectivityStateManager with +// the specified id. +func newConnectivityStateManager(ctx context.Context, id *channelz.Identifier) *connectivityStateManager { + return &connectivityStateManager{ + channelzID: id, + pubSub: grpcsync.NewPubSub(ctx), + } +} + // connectivityStateManager keeps the connectivity.State of ClientConn. // This struct will eventually be exported so the balancers can access it. +// +// TODO: If possible, get rid of the `connectivityStateManager` type, and +// provide this functionality using the `PubSub`, to avoid keeping track of +// the connectivity state at two places. type connectivityStateManager struct { mu sync.Mutex state connectivity.State notifyChan chan struct{} channelzID *channelz.Identifier + pubSub *grpcsync.PubSub } // updateState updates the connectivity.State of ClientConn. @@ -562,6 +527,8 @@ func (csm *connectivityStateManager) updateState(state connectivity.State) { return } csm.state = state + csm.pubSub.Publish(state) + channelz.Infof(logger, csm.channelzID, "Channel Connectivity change to %v", state) if csm.notifyChan != nil { // There are other goroutines waiting on this channel. @@ -591,7 +558,7 @@ func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { type ClientConnInterface interface { // Invoke performs a unary RPC and returns after the response is received // into reply. - Invoke(ctx context.Context, method string, args interface{}, reply interface{}, opts ...CallOption) error + Invoke(ctx context.Context, method string, args any, reply any, opts ...CallOption) error // NewStream begins a streaming RPC. NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) } @@ -622,53 +589,35 @@ type ClientConn struct { dopts dialOptions // Default and user specified dial options. channelzID *channelz.Identifier // Channelz identifier for the channel. resolverBuilder resolver.Builder // See parseTargetAndFindResolver(). - balancerWrapper *ccBalancerWrapper // Uses gracefulswitch.balancer underneath. - idlenessMgr idlenessManager + idlenessMgr *idle.Manager // The following provide their own synchronization, and therefore don't // require cc.mu to be held to access them. csMgr *connectivityStateManager - blockingpicker *pickerWrapper + pickerWrapper *pickerWrapper safeConfigSelector iresolver.SafeConfigSelector czData *channelzData retryThrottler atomic.Value // Updated from service config. - // firstResolveEvent is used to track whether the name resolver sent us at - // least one update. RPCs block on this event. - firstResolveEvent *grpcsync.Event - // mu protects the following fields. // TODO: split mu so the same mutex isn't used for everything. mu sync.RWMutex - resolverWrapper *ccResolverWrapper // Initialized in Dial; cleared in Close. + resolverWrapper *ccResolverWrapper // Always recreated whenever entering idle to simplify Close. + balancerWrapper *ccBalancerWrapper // Always recreated whenever entering idle to simplify Close. sc *ServiceConfig // Latest service config received from the resolver. conns map[*addrConn]struct{} // Set to nil on close. mkp keepalive.ClientParameters // May be updated upon receipt of a GoAway. - idlenessState ccIdlenessState // Tracks idleness state of the channel. - exitIdleCond *sync.Cond // Signalled when channel exits idle. + // firstResolveEvent is used to track whether the name resolver sent us at + // least one update. RPCs block on this event. May be accessed without mu + // if we know we cannot be asked to enter idle mode while accessing it (e.g. + // when the idle manager has already been closed, or if we are already + // entering idle mode). + firstResolveEvent *grpcsync.Event lceMu sync.Mutex // protects lastConnectionError lastConnectionError error } -// ccIdlenessState tracks the idleness state of the channel. -// -// Channels start off in `active` and move to `idle` after a period of -// inactivity. When moving back to `active` upon an incoming RPC, they -// transition through `exiting_idle`. This state is useful for synchronization -// with Close(). -// -// This state tracking is mostly for self-protection. The idlenessManager is -// expected to keep track of the state as well, and is expected not to call into -// the ClientConn unnecessarily. -type ccIdlenessState int8 - -const ( - ccIdlenessStateActive ccIdlenessState = iota - ccIdlenessStateIdle - ccIdlenessStateExitingIdle -) - // WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or // ctx expires. A true value is returned in former case and false in latter. // @@ -708,29 +657,15 @@ func (cc *ClientConn) GetState() connectivity.State { // Notice: This API is EXPERIMENTAL and may be changed or removed in a later // release. func (cc *ClientConn) Connect() { - cc.exitIdleMode() + if err := cc.idlenessMgr.ExitIdleMode(); err != nil { + cc.addTraceEvent(err.Error()) + return + } // If the ClientConn was not in idle mode, we need to call ExitIdle on the // LB policy so that connections can be created. - cc.balancerWrapper.exitIdleMode() -} - -func (cc *ClientConn) scWatcher() { - for { - select { - case sc, ok := <-cc.dopts.scChan: - if !ok { - return - } - cc.mu.Lock() - // TODO: load balance policy runtime change is ignored. - // We may revisit this decision in the future. - cc.sc = &sc - cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{&sc}) - cc.mu.Unlock() - case <-cc.ctx.Done(): - return - } - } + cc.mu.Lock() + cc.balancerWrapper.exitIdle() + cc.mu.Unlock() } // waitForResolvedAddrs blocks until the resolver has provided addresses or the @@ -760,6 +695,16 @@ func init() { panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err)) } emptyServiceConfig = cfg.Config.(*ServiceConfig) + + internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { + return cc.csMgr.pubSub.Subscribe(s) + } + internal.EnterIdleModeForTesting = func(cc *ClientConn) { + cc.idlenessMgr.EnterIdleModeForTesting() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.idlenessMgr.ExitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { @@ -774,9 +719,8 @@ func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { } } -func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { +func (cc *ClientConn) updateResolverStateAndUnlock(s resolver.State, err error) error { defer cc.firstResolveEvent.Fire() - cc.mu.Lock() // Check if the ClientConn is already closed. Some fields (e.g. // balancerWrapper) are set to nil when closing the ClientConn, and could // cause nil pointer panic if we don't have this check. @@ -822,7 +766,7 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { if cc.sc == nil { // Apply the failing LB only if we haven't received valid service config // from the name resolver in the past. - cc.applyFailingLB(s.ServiceConfig) + cc.applyFailingLBLocked(s.ServiceConfig) cc.mu.Unlock() return ret } @@ -844,15 +788,13 @@ func (cc *ClientConn) updateResolverState(s resolver.State, err error) error { return ret } -// applyFailingLB is akin to configuring an LB policy on the channel which +// applyFailingLBLocked is akin to configuring an LB policy on the channel which // always fails RPCs. Here, an actual LB policy is not configured, but an always // erroring picker is configured, which returns errors with information about // what was invalid in the received service config. A config selector with no // service config is configured, and the connectivity state of the channel is // set to TransientFailure. -// -// Caller must hold cc.mu. -func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { +func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) { var err error if sc.Err != nil { err = status.Errorf(codes.Unavailable, "error parsing service config: %v", sc.Err) @@ -860,14 +802,10 @@ func (cc *ClientConn) applyFailingLB(sc *serviceconfig.ParseResult) { err = status.Errorf(codes.Unavailable, "illegal service config type: %T", sc.Config) } cc.safeConfigSelector.UpdateConfigSelector(&defaultConfigSelector{nil}) - cc.blockingpicker.updatePicker(base.NewErrPicker(err)) + cc.pickerWrapper.updatePicker(base.NewErrPicker(err)) cc.csMgr.updateState(connectivity.TransientFailure) } -func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State, err error) { - cc.balancerWrapper.updateSubConnState(sc, s, err) -} - // Makes a copy of the input addresses slice and clears out the balancer // attributes field. Addresses are passed during subconn creation and address // update operations. In both cases, we will clear the balancer attributes by @@ -882,10 +820,14 @@ func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Ad return out } -// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// newAddrConnLocked creates an addrConn for addrs and adds it to cc.conns. // // Caller needs to make sure len(addrs) > 0. -func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { +func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.NewSubConnOptions) (*addrConn, error) { + if cc.conns == nil { + return nil, ErrClientConnClosing + } + ac := &addrConn{ state: connectivity.Idle, cc: cc, @@ -897,12 +839,6 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub stateChan: make(chan struct{}), } ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - defer cc.mu.Unlock() - if cc.conns == nil { - return nil, ErrClientConnClosing - } var err error ac.channelzID, err = channelz.RegisterSubChannel(ac, cc.channelzID, "") @@ -918,6 +854,7 @@ func (cc *ClientConn) newAddrConn(addrs []resolver.Address, opts balancer.NewSub }, }) + // Track ac in cc. This needs to be done before any getTransport(...) is called. cc.conns[ac] = struct{}{} return ac, nil } @@ -1047,8 +984,8 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) { ac.cancel() ac.ctx, ac.cancel = context.WithCancel(ac.cc.ctx) - // We have to defer here because GracefulClose => Close => onClose, which - // requires locking ac.mu. + // We have to defer here because GracefulClose => onClose, which requires + // locking ac.mu. if ac.transport != nil { defer ac.transport.GracefulClose() ac.transport = nil @@ -1124,7 +1061,7 @@ func (cc *ClientConn) healthCheckConfig() *healthCheckConfig { } func (cc *ClientConn) getTransport(ctx context.Context, failfast bool, method string) (transport.ClientTransport, balancer.PickResult, error) { - return cc.blockingpicker.pick(ctx, failfast, balancer.PickInfo{ + return cc.pickerWrapper.pick(ctx, failfast, balancer.PickInfo{ Ctx: ctx, FullMethodName: method, }) @@ -1153,35 +1090,25 @@ func (cc *ClientConn) applyServiceConfigAndBalancer(sc *ServiceConfig, configSel } var newBalancerName string - if cc.sc != nil && cc.sc.lbConfig != nil { + if cc.sc == nil || (cc.sc.lbConfig == nil && cc.sc.LB == nil) { + // No service config or no LB policy specified in config. + newBalancerName = PickFirstBalancerName + } else if cc.sc.lbConfig != nil { newBalancerName = cc.sc.lbConfig.name - } else { - var isGRPCLB bool - for _, a := range addrs { - if a.Type == resolver.GRPCLB { - isGRPCLB = true - break - } - } - if isGRPCLB { - newBalancerName = grpclbName - } else if cc.sc != nil && cc.sc.LB != nil { - newBalancerName = *cc.sc.LB - } else { - newBalancerName = PickFirstBalancerName - } + } else { // cc.sc.LB != nil + newBalancerName = *cc.sc.LB } cc.balancerWrapper.switchTo(newBalancerName) } func (cc *ClientConn) resolveNow(o resolver.ResolveNowOptions) { cc.mu.RLock() - r := cc.resolverWrapper + cc.resolverWrapper.resolveNow(o) cc.mu.RUnlock() - if r == nil { - return - } - go r.resolveNow(o) +} + +func (cc *ClientConn) resolveNowLocked(o resolver.ResolveNowOptions) { + cc.resolverWrapper.resolveNow(o) } // ResetConnectBackoff wakes up all subchannels in transient failure and causes @@ -1208,7 +1135,14 @@ func (cc *ClientConn) ResetConnectBackoff() { // Close tears down the ClientConn and all underlying connections. func (cc *ClientConn) Close() error { - defer cc.cancel() + defer func() { + cc.cancel() + <-cc.csMgr.pubSub.Done() + }() + + // Prevent calls to enter/exit idle immediately, and ensure we are not + // currently entering/exiting idle mode. + cc.idlenessMgr.Close() cc.mu.Lock() if cc.conns == nil { @@ -1216,34 +1150,22 @@ func (cc *ClientConn) Close() error { return ErrClientConnClosing } - for cc.idlenessState == ccIdlenessStateExitingIdle { - cc.exitIdleCond.Wait() - } - conns := cc.conns cc.conns = nil cc.csMgr.updateState(connectivity.Shutdown) - pWrapper := cc.blockingpicker - rWrapper := cc.resolverWrapper - bWrapper := cc.balancerWrapper - idlenessMgr := cc.idlenessMgr + // We can safely unlock and continue to access all fields now as + // cc.conns==nil, preventing any further operations on cc. cc.mu.Unlock() + cc.resolverWrapper.close() // The order of closing matters here since the balancer wrapper assumes the // picker is closed before it is closed. - if pWrapper != nil { - pWrapper.close() - } - if bWrapper != nil { - bWrapper.close() - } - if rWrapper != nil { - rWrapper.close() - } - if idlenessMgr != nil { - idlenessMgr.close() - } + cc.pickerWrapper.close() + cc.balancerWrapper.close() + + <-cc.resolverWrapper.serializer.Done() + <-cc.balancerWrapper.serializer.Done() for ac := range conns { ac.tearDown(ErrClientConnClosing) @@ -1264,7 +1186,7 @@ type addrConn struct { cc *ClientConn dopts dialOptions - acbw balancer.SubConn + acbw *acBalancerWrapper scopts balancer.NewSubConnOptions // transport is set when there's a viable transport (note: ac state may not be READY as LB channel @@ -1302,7 +1224,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error) } else { channelz.Infof(logger, ac.channelzID, "Subchannel Connectivity change to %v, last error: %s", s, lastErr) } - ac.cc.handleSubConnStateChange(ac.acbw, s, lastErr) + ac.acbw.updateState(s, lastErr) } // adjustParams updates parameters used to create transports upon @@ -1352,12 +1274,14 @@ func (ac *addrConn) resetTransport() { if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil { ac.cc.resolveNow(resolver.ResolveNowOptions{}) - // After exhausting all addresses, the addrConn enters - // TRANSIENT_FAILURE. + ac.mu.Lock() if acCtx.Err() != nil { + // addrConn was torn down. + ac.mu.Unlock() return } - ac.mu.Lock() + // After exhausting all addresses, the addrConn enters + // TRANSIENT_FAILURE. ac.updateConnectivityState(connectivity.TransientFailure, err) // Backoff. @@ -1553,7 +1477,7 @@ func (ac *addrConn) startHealthCheck(ctx context.Context) { // Set up the health check helper functions. currentTr := ac.transport - newStream := func(method string) (interface{}, error) { + newStream := func(method string) (any, error) { ac.mu.Lock() if ac.transport != currentTr { ac.mu.Unlock() @@ -1641,16 +1565,7 @@ func (ac *addrConn) tearDown(err error) { ac.updateConnectivityState(connectivity.Shutdown, nil) ac.cancel() ac.curAddr = resolver.Address{} - if err == errConnDrain && curTr != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - // We have to unlock and re-lock here because GracefulClose => Close => onClose, which requires locking ac.mu. - ac.mu.Unlock() - curTr.GracefulClose() - ac.mu.Lock() - } + channelz.AddTraceEvent(logger, ac.channelzID, 0, &channelz.TraceEventDesc{ Desc: "Subchannel deleted", Severity: channelz.CtInfo, @@ -1664,6 +1579,29 @@ func (ac *addrConn) tearDown(err error) { // being deleted right away. channelz.RemoveEntry(ac.channelzID) ac.mu.Unlock() + + // We have to release the lock before the call to GracefulClose/Close here + // because both of them call onClose(), which requires locking ac.mu. + if curTr != nil { + if err == errConnDrain { + // Close the transport gracefully when the subConn is being shutdown. + // + // GracefulClose() may be executed multiple times if: + // - multiple GoAway frames are received from the server + // - there are concurrent name resolver or balancer triggered + // address removal and GoAway + curTr.GracefulClose() + } else { + // Hard close the transport when the channel is entering idle or is + // being shutdown. In the case where the channel is being shutdown, + // closing of transports is also taken care of by cancelation of cc.ctx. + // But in the case where the channel is entering idle, we need to + // explicitly close the transports here. Instead of distinguishing + // between these two cases, it is simpler to close the transport + // unconditionally here. + curTr.Close(err) + } + } } func (ac *addrConn) getState() connectivity.State { @@ -1790,7 +1728,7 @@ func (cc *ClientConn) parseTargetAndFindResolver() error { if err != nil { channelz.Infof(logger, cc.channelzID, "dial target %q parse failed: %v", cc.target, err) } else { - channelz.Infof(logger, cc.channelzID, "parsed dial target is: %+v", parsedTarget) + channelz.Infof(logger, cc.channelzID, "parsed dial target is: %#v", parsedTarget) rb = cc.getResolver(parsedTarget.URL.Scheme) if rb != nil { cc.parsedTarget = parsedTarget @@ -1948,32 +1886,3 @@ func (cc *ClientConn) determineAuthority() error { channelz.Infof(logger, cc.channelzID, "Channel authority set to %q", cc.authority) return nil } - -// initResolverWrapper creates a ccResolverWrapper, which builds the name -// resolver. This method grabs the lock to assign the newly built resolver -// wrapper to the cc.resolverWrapper field. -func (cc *ClientConn) initResolverWrapper(creds credentials.TransportCredentials) error { - rw, err := newCCResolverWrapper(cc, ccResolverWrapperOpts{ - target: cc.parsedTarget, - builder: cc.resolverBuilder, - bOpts: resolver.BuildOptions{ - DisableServiceConfig: cc.dopts.disableServiceConfig, - DialCreds: creds, - CredsBundle: cc.dopts.copts.CredsBundle, - Dialer: cc.dopts.copts.Dialer, - }, - channelzID: cc.channelzID, - }) - if err != nil { - return fmt.Errorf("failed to build resolver: %v", err) - } - // Resolver implementations may report state update or error inline when - // built (or right after), and this is handled in cc.updateResolverState. - // Also, an error from the resolver might lead to a re-resolution request - // from the balancer, which is handled in resolveNow() where - // `cc.resolverWrapper` is accessed. Hence, we need to hold the lock here. - cc.mu.Lock() - cc.resolverWrapper = rw - cc.mu.Unlock() - return nil -} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/codec.go b/.ci/providerlint/vendor/google.golang.org/grpc/codec.go index 12977654781..411e3dfd47c 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/codec.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/codec.go @@ -27,8 +27,8 @@ import ( // omits the name/string, which vary between the two and are not needed for // anything besides the registry in the encoding package. type baseCodec interface { - Marshal(v interface{}) ([]byte, error) - Unmarshal(data []byte, v interface{}) error + Marshal(v any) ([]byte, error) + Unmarshal(data []byte, v any) error } var _ baseCodec = Codec(nil) @@ -41,9 +41,9 @@ var _ baseCodec = encoding.Codec(nil) // Deprecated: use encoding.Codec instead. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // String returns the name of the Codec implementation. This is unused by // gRPC. String() string diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/codes/codes.go b/.ci/providerlint/vendor/google.golang.org/grpc/codes/codes.go index 11b106182db..08476ad1fe1 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/codes/codes.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/codes/codes.go @@ -25,7 +25,13 @@ import ( "strconv" ) -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +// A Code is a status code defined according to the [gRPC documentation]. +// +// Only the codes defined as consts in this package are valid codes. Do not use +// other code values. Behavior of other codes is implementation-specific and +// interoperability between implementations is not guaranteed. +// +// [gRPC documentation]: https://github.com/grpc/grpc/blob/master/doc/statuscodes.md type Code uint32 const ( diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/credentials/tls.go b/.ci/providerlint/vendor/google.golang.org/grpc/credentials/tls.go index 877b7cd21af..5dafd34edf9 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/credentials/tls.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/credentials/tls.go @@ -44,10 +44,25 @@ func (t TLSInfo) AuthType() string { return "tls" } +// cipherSuiteLookup returns the string version of a TLS cipher suite ID. +func cipherSuiteLookup(cipherSuiteID uint16) string { + for _, s := range tls.CipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + for _, s := range tls.InsecureCipherSuites() { + if s.ID == cipherSuiteID { + return s.Name + } + } + return fmt.Sprintf("unknown ID: %v", cipherSuiteID) +} + // GetSecurityValue returns security info requested by channelz. func (t TLSInfo) GetSecurityValue() ChannelzSecurityValue { v := &TLSChannelzSecurityValue{ - StandardName: cipherSuiteLookup[t.State.CipherSuite], + StandardName: cipherSuiteLookup(t.State.CipherSuite), } // Currently there's no way to get LocalCertificate info from tls package. if len(t.State.PeerCertificates) > 0 { @@ -138,10 +153,39 @@ func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { return nil } +// The following cipher suites are forbidden for use with HTTP/2 by +// https://datatracker.ietf.org/doc/html/rfc7540#appendix-A +var tls12ForbiddenCipherSuites = map[uint16]struct{}{ + tls.TLS_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_RSA_WITH_AES_128_GCM_SHA256: {}, + tls.TLS_RSA_WITH_AES_256_GCM_SHA384: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: {}, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: {}, +} + // NewTLS uses c to construct a TransportCredentials based on TLS. func NewTLS(c *tls.Config) TransportCredentials { tc := &tlsCreds{credinternal.CloneTLSConfig(c)} tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos) + // If the user did not configure a MinVersion and did not configure a + // MaxVersion < 1.2, use MinVersion=1.2, which is required by + // https://datatracker.ietf.org/doc/html/rfc7540#section-9.2 + if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) { + tc.config.MinVersion = tls.VersionTLS12 + } + // If the user did not configure CipherSuites, use all "secure" cipher + // suites reported by the TLS package, but remove some explicitly forbidden + // by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A + if tc.config.CipherSuites == nil { + for _, cs := range tls.CipherSuites() { + if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok { + tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID) + } + } + } return tc } @@ -205,32 +249,3 @@ type TLSChannelzSecurityValue struct { LocalCertificate []byte RemoteCertificate []byte } - -var cipherSuiteLookup = map[uint16]string{ - tls.TLS_RSA_WITH_RC4_128_SHA: "TLS_RSA_WITH_RC4_128_SHA", - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_CBC_SHA: "TLS_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_RSA_WITH_AES_256_CBC_SHA: "TLS_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_RSA_WITH_AES_128_GCM_SHA256: "TLS_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_RSA_WITH_AES_256_GCM_SHA384: "TLS_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA: "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA: "TLS_ECDHE_RSA_WITH_RC4_128_SHA", - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA: "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384: "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - tls.TLS_FALLBACK_SCSV: "TLS_FALLBACK_SCSV", - tls.TLS_RSA_WITH_AES_128_CBC_SHA256: "TLS_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256: "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - tls.TLS_AES_128_GCM_SHA256: "TLS_AES_128_GCM_SHA256", - tls.TLS_AES_256_GCM_SHA384: "TLS_AES_256_GCM_SHA384", - tls.TLS_CHACHA20_POLY1305_SHA256: "TLS_CHACHA20_POLY1305_SHA256", -} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/dialoptions.go b/.ci/providerlint/vendor/google.golang.org/grpc/dialoptions.go index 23ea95237ea..ba242618040 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/dialoptions.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/dialoptions.go @@ -46,6 +46,7 @@ func init() { internal.WithBinaryLogger = withBinaryLogger internal.JoinDialOptions = newJoinDialOption internal.DisableGlobalDialOptions = newDisableGlobalDialOptions + internal.WithRecvBufferPool = withRecvBufferPool } // dialOptions configure a Dial call. dialOptions are set by the DialOption @@ -63,7 +64,6 @@ type dialOptions struct { block bool returnLastError bool timeout time.Duration - scChan <-chan ServiceConfig authority string binaryLogger binarylog.Logger copts transport.ConnectOptions @@ -139,6 +139,20 @@ func newJoinDialOption(opts ...DialOption) DialOption { return &joinDialOption{opts: opts} } +// WithSharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func WithSharedWriteBuffer(val bool) DialOption { + return newFuncDialOption(func(o *dialOptions) { + o.copts.SharedWriteBuffer = val + }) +} + // WithWriteBufferSize determines how much data can be batched before doing a // write on the wire. The corresponding memory allocation for this buffer will // be twice the size to keep syscalls low. The default value for this buffer is @@ -236,19 +250,6 @@ func WithDecompressor(dc Decompressor) DialOption { }) } -// WithServiceConfig returns a DialOption which has a channel to read the -// service configuration. -// -// Deprecated: service config should be received through name resolver or via -// WithDefaultServiceConfig, as specified at -// https://github.com/grpc/grpc/blob/master/doc/service_config.md. Will be -// removed in a future 1.x release. -func WithServiceConfig(c <-chan ServiceConfig) DialOption { - return newFuncDialOption(func(o *dialOptions) { - o.scChan = c - }) -} - // WithConnectParams configures the ClientConn to use the provided ConnectParams // for creating and maintaining connections to servers. // @@ -399,6 +400,17 @@ func WithTimeout(d time.Duration) DialOption { // connections. If FailOnNonTempDialError() is set to true, and an error is // returned by f, gRPC checks the error's Temporary() method to decide if it // should try to reconnect to the network address. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, use a net.Dialer that sets +// the KeepAlive field to a negative value, and sets the SO_KEEPALIVE socket +// option to true from the Control field. For a concrete example of how to do +// this, see internal.NetDialerWithTCPKeepalive(). +// +// For more information, please see [issue 23459] in the Go github repo. +// +// [issue 23459]: https://github.com/golang/go/issues/23459 func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { return newFuncDialOption(func(o *dialOptions) { o.copts.Dialer = f @@ -473,7 +485,7 @@ func FailOnNonTempDialError(f bool) DialOption { // the RPCs. func WithUserAgent(s string) DialOption { return newFuncDialOption(func(o *dialOptions) { - o.copts.UserAgent = s + o.copts.UserAgent = s + " " + grpcUA }) } @@ -623,13 +635,16 @@ func withHealthCheckFunc(f internal.HealthChecker) DialOption { func defaultDialOptions() dialOptions { return dialOptions{ - healthCheckFunc: internal.HealthCheckFunc, copts: transport.ConnectOptions{ - WriteBufferSize: defaultWriteBufSize, ReadBufferSize: defaultReadBufSize, + WriteBufferSize: defaultWriteBufSize, UseProxy: true, + UserAgent: grpcUA, }, - recvBufferPool: nopBufferPool{}, + bs: internalbackoff.DefaultExponential, + healthCheckFunc: internal.HealthCheckFunc, + idleTimeout: 30 * time.Minute, + recvBufferPool: nopBufferPool{}, } } @@ -666,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // @@ -690,11 +705,13 @@ func WithIdleTimeout(d time.Duration) DialOption { // options are used: WithStatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption { + return withRecvBufferPool(bufferPool) +} + +func withRecvBufferPool(bufferPool SharedBufferPool) DialOption { return newFuncDialOption(func(o *dialOptions) { o.recvBufferPool = bufferPool }) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/encoding/encoding.go b/.ci/providerlint/vendor/google.golang.org/grpc/encoding/encoding.go index 07a5861352a..5ebf88d7147 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) @@ -90,9 +85,9 @@ func GetCompressor(name string) Compressor { // methods can be called from concurrent goroutines. type Codec interface { // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) + Marshal(v any) ([]byte, error) // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error + Unmarshal(data []byte, v any) error // Name returns the name of the Codec implementation. The returned string // will be used as part of content type in transmission. The result must be // static; the result cannot change between calls. diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/encoding/proto/proto.go b/.ci/providerlint/vendor/google.golang.org/grpc/encoding/proto/proto.go index 3009b35afe7..0ee3d3bae97 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/encoding/proto/proto.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -37,7 +37,7 @@ func init() { // codec is a Codec implementation with protobuf. It is the default codec for gRPC. type codec struct{} -func (codec) Marshal(v interface{}) ([]byte, error) { +func (codec) Marshal(v any) ([]byte, error) { vv, ok := v.(proto.Message) if !ok { return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v) @@ -45,7 +45,7 @@ func (codec) Marshal(v interface{}) ([]byte, error) { return proto.Marshal(vv) } -func (codec) Unmarshal(data []byte, v interface{}) error { +func (codec) Unmarshal(data []byte, v any) error { vv, ok := v.(proto.Message) if !ok { return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/component.go b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/component.go index 8358dd6e2ab..ac73c9ced25 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/component.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/component.go @@ -31,71 +31,71 @@ type componentData struct { var cache = map[string]*componentData{} -func (c *componentData) InfoDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) InfoDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.InfoDepth(depth+1, args...) } -func (c *componentData) WarningDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) WarningDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.WarningDepth(depth+1, args...) } -func (c *componentData) ErrorDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) ErrorDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.ErrorDepth(depth+1, args...) } -func (c *componentData) FatalDepth(depth int, args ...interface{}) { - args = append([]interface{}{"[" + string(c.name) + "]"}, args...) +func (c *componentData) FatalDepth(depth int, args ...any) { + args = append([]any{"[" + string(c.name) + "]"}, args...) grpclog.FatalDepth(depth+1, args...) } -func (c *componentData) Info(args ...interface{}) { +func (c *componentData) Info(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warning(args ...interface{}) { +func (c *componentData) Warning(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Error(args ...interface{}) { +func (c *componentData) Error(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatal(args ...interface{}) { +func (c *componentData) Fatal(args ...any) { c.FatalDepth(1, args...) } -func (c *componentData) Infof(format string, args ...interface{}) { +func (c *componentData) Infof(format string, args ...any) { c.InfoDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Warningf(format string, args ...interface{}) { +func (c *componentData) Warningf(format string, args ...any) { c.WarningDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Errorf(format string, args ...interface{}) { +func (c *componentData) Errorf(format string, args ...any) { c.ErrorDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Fatalf(format string, args ...interface{}) { +func (c *componentData) Fatalf(format string, args ...any) { c.FatalDepth(1, fmt.Sprintf(format, args...)) } -func (c *componentData) Infoln(args ...interface{}) { +func (c *componentData) Infoln(args ...any) { c.InfoDepth(1, args...) } -func (c *componentData) Warningln(args ...interface{}) { +func (c *componentData) Warningln(args ...any) { c.WarningDepth(1, args...) } -func (c *componentData) Errorln(args ...interface{}) { +func (c *componentData) Errorln(args ...any) { c.ErrorDepth(1, args...) } -func (c *componentData) Fatalln(args ...interface{}) { +func (c *componentData) Fatalln(args ...any) { c.FatalDepth(1, args...) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/grpclog.go b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/grpclog.go index c8bb2be34bf..16928c9cb99 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/grpclog.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -42,53 +42,53 @@ func V(l int) bool { } // Info logs to the INFO log. -func Info(args ...interface{}) { +func Info(args ...any) { grpclog.Logger.Info(args...) } // Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. -func Infof(format string, args ...interface{}) { +func Infof(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. -func Infoln(args ...interface{}) { +func Infoln(args ...any) { grpclog.Logger.Infoln(args...) } // Warning logs to the WARNING log. -func Warning(args ...interface{}) { +func Warning(args ...any) { grpclog.Logger.Warning(args...) } // Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. -func Warningf(format string, args ...interface{}) { +func Warningf(format string, args ...any) { grpclog.Logger.Warningf(format, args...) } // Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. -func Warningln(args ...interface{}) { +func Warningln(args ...any) { grpclog.Logger.Warningln(args...) } // Error logs to the ERROR log. -func Error(args ...interface{}) { +func Error(args ...any) { grpclog.Logger.Error(args...) } // Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. -func Errorf(format string, args ...interface{}) { +func Errorf(format string, args ...any) { grpclog.Logger.Errorf(format, args...) } // Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. -func Errorln(args ...interface{}) { +func Errorln(args ...any) { grpclog.Logger.Errorln(args...) } // Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. // It calls os.Exit() with exit code 1. -func Fatal(args ...interface{}) { +func Fatal(args ...any) { grpclog.Logger.Fatal(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -96,7 +96,7 @@ func Fatal(args ...interface{}) { // Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. // It calls os.Exit() with exit code 1. -func Fatalf(format string, args ...interface{}) { +func Fatalf(format string, args ...any) { grpclog.Logger.Fatalf(format, args...) // Make sure fatal logs will exit. os.Exit(1) @@ -104,7 +104,7 @@ func Fatalf(format string, args ...interface{}) { // Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. // It calle os.Exit()) with exit code 1. -func Fatalln(args ...interface{}) { +func Fatalln(args ...any) { grpclog.Logger.Fatalln(args...) // Make sure fatal logs will exit. os.Exit(1) @@ -113,20 +113,20 @@ func Fatalln(args ...interface{}) { // Print prints to the logger. Arguments are handled in the manner of fmt.Print. // // Deprecated: use Info. -func Print(args ...interface{}) { +func Print(args ...any) { grpclog.Logger.Info(args...) } // Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. // // Deprecated: use Infof. -func Printf(format string, args ...interface{}) { +func Printf(format string, args ...any) { grpclog.Logger.Infof(format, args...) } // Println prints to the logger. Arguments are handled in the manner of fmt.Println. // // Deprecated: use Infoln. -func Println(args ...interface{}) { +func Println(args ...any) { grpclog.Logger.Infoln(args...) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/logger.go b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/logger.go index ef06a4822b7..b1674d8267c 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/logger.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/logger.go @@ -24,12 +24,12 @@ import "google.golang.org/grpc/internal/grpclog" // // Deprecated: use LoggerV2. type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) + Fatal(args ...any) + Fatalf(format string, args ...any) + Fatalln(args ...any) + Print(args ...any) + Printf(format string, args ...any) + Println(args ...any) } // SetLogger sets the logger that is used in grpc. Call only from @@ -45,39 +45,39 @@ type loggerWrapper struct { Logger } -func (g *loggerWrapper) Info(args ...interface{}) { +func (g *loggerWrapper) Info(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Infoln(args ...interface{}) { +func (g *loggerWrapper) Infoln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Infof(format string, args ...interface{}) { +func (g *loggerWrapper) Infof(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Warning(args ...interface{}) { +func (g *loggerWrapper) Warning(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Warningln(args ...interface{}) { +func (g *loggerWrapper) Warningln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Warningf(format string, args ...interface{}) { +func (g *loggerWrapper) Warningf(format string, args ...any) { g.Logger.Printf(format, args...) } -func (g *loggerWrapper) Error(args ...interface{}) { +func (g *loggerWrapper) Error(args ...any) { g.Logger.Print(args...) } -func (g *loggerWrapper) Errorln(args ...interface{}) { +func (g *loggerWrapper) Errorln(args ...any) { g.Logger.Println(args...) } -func (g *loggerWrapper) Errorf(format string, args ...interface{}) { +func (g *loggerWrapper) Errorf(format string, args ...any) { g.Logger.Printf(format, args...) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 5de66e40d36..ecfd36d7130 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -33,35 +33,35 @@ import ( // LoggerV2 does underlying logging work for grpclog. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -182,53 +182,53 @@ func (g *loggerT) output(severity int, s string) { g.m[severity].Output(2, string(b)) } -func (g *loggerT) Info(args ...interface{}) { +func (g *loggerT) Info(args ...any) { g.output(infoLog, fmt.Sprint(args...)) } -func (g *loggerT) Infoln(args ...interface{}) { +func (g *loggerT) Infoln(args ...any) { g.output(infoLog, fmt.Sprintln(args...)) } -func (g *loggerT) Infof(format string, args ...interface{}) { +func (g *loggerT) Infof(format string, args ...any) { g.output(infoLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Warning(args ...interface{}) { +func (g *loggerT) Warning(args ...any) { g.output(warningLog, fmt.Sprint(args...)) } -func (g *loggerT) Warningln(args ...interface{}) { +func (g *loggerT) Warningln(args ...any) { g.output(warningLog, fmt.Sprintln(args...)) } -func (g *loggerT) Warningf(format string, args ...interface{}) { +func (g *loggerT) Warningf(format string, args ...any) { g.output(warningLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Error(args ...interface{}) { +func (g *loggerT) Error(args ...any) { g.output(errorLog, fmt.Sprint(args...)) } -func (g *loggerT) Errorln(args ...interface{}) { +func (g *loggerT) Errorln(args ...any) { g.output(errorLog, fmt.Sprintln(args...)) } -func (g *loggerT) Errorf(format string, args ...interface{}) { +func (g *loggerT) Errorf(format string, args ...any) { g.output(errorLog, fmt.Sprintf(format, args...)) } -func (g *loggerT) Fatal(args ...interface{}) { +func (g *loggerT) Fatal(args ...any) { g.output(fatalLog, fmt.Sprint(args...)) os.Exit(1) } -func (g *loggerT) Fatalln(args ...interface{}) { +func (g *loggerT) Fatalln(args ...any) { g.output(fatalLog, fmt.Sprintln(args...)) os.Exit(1) } -func (g *loggerT) Fatalf(format string, args ...interface{}) { +func (g *loggerT) Fatalf(format string, args ...any) { g.output(fatalLog, fmt.Sprintf(format, args...)) os.Exit(1) } @@ -248,11 +248,11 @@ func (g *loggerT) V(l int) bool { type DepthLoggerV2 interface { LoggerV2 // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/health/client.go b/.ci/providerlint/vendor/google.golang.org/grpc/health/client.go index b5bee483802..740745c45f6 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/health/client.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/health/client.go @@ -56,7 +56,7 @@ const healthCheckMethod = "/grpc.health.v1.Health/Watch" // This function implements the protocol defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -func clientHealthCheck(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), service string) error { +func clientHealthCheck(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), service string) error { tryCnt := 0 retryConnection: diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go index 142d35f753e..24299efd63f 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/health/v1/health.proto diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d54b..4439cda0f3c 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -44,8 +44,15 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/idle.go b/.ci/providerlint/vendor/google.golang.org/grpc/idle.go deleted file mode 100644 index dc3dc72f6b0..00000000000 --- a/.ci/providerlint/vendor/google.golang.org/grpc/idle.go +++ /dev/null @@ -1,287 +0,0 @@ -/* - * - * Copyright 2023 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "fmt" - "math" - "sync" - "sync/atomic" - "time" -) - -// For overriding in unit tests. -var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { - return time.AfterFunc(d, f) -} - -// idlenessEnforcer is the functionality provided by grpc.ClientConn to enter -// and exit from idle mode. -type idlenessEnforcer interface { - exitIdleMode() error - enterIdleMode() error -} - -// idlenessManager defines the functionality required to track RPC activity on a -// channel. -type idlenessManager interface { - onCallBegin() error - onCallEnd() - close() -} - -type noopIdlenessManager struct{} - -func (noopIdlenessManager) onCallBegin() error { return nil } -func (noopIdlenessManager) onCallEnd() {} -func (noopIdlenessManager) close() {} - -// idlenessManagerImpl implements the idlenessManager interface. It uses atomic -// operations to synchronize access to shared state and a mutex to guarantee -// mutual exclusion in a critical section. -type idlenessManagerImpl struct { - // State accessed atomically. - lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. - activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. - activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. - closed int32 // Boolean; True when the manager is closed. - - // Can be accessed without atomics or mutex since these are set at creation - // time and read-only after that. - enforcer idlenessEnforcer // Functionality provided by grpc.ClientConn. - timeout int64 // Idle timeout duration nanos stored as an int64. - - // idleMu is used to guarantee mutual exclusion in two scenarios: - // - Opposing intentions: - // - a: Idle timeout has fired and handleIdleTimeout() is trying to put - // the channel in idle mode because the channel has been inactive. - // - b: At the same time an RPC is made on the channel, and onCallBegin() - // is trying to prevent the channel from going idle. - // - Competing intentions: - // - The channel is in idle mode and there are multiple RPCs starting at - // the same time, all trying to move the channel out of idle. Only one - // of them should succeed in doing so, while the other RPCs should - // piggyback on the first one and be successfully handled. - idleMu sync.RWMutex - actuallyIdle bool - timer *time.Timer -} - -// newIdlenessManager creates a new idleness manager implementation for the -// given idle timeout. -func newIdlenessManager(enforcer idlenessEnforcer, idleTimeout time.Duration) idlenessManager { - if idleTimeout == 0 { - return noopIdlenessManager{} - } - - i := &idlenessManagerImpl{ - enforcer: enforcer, - timeout: int64(idleTimeout), - } - i.timer = timeAfterFunc(idleTimeout, i.handleIdleTimeout) - return i -} - -// resetIdleTimer resets the idle timer to the given duration. This method -// should only be called from the timer callback. -func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if i.timer == nil { - // Only close sets timer to nil. We are done. - return - } - - // It is safe to ignore the return value from Reset() because this method is - // only ever called from the timer callback, which means the timer has - // already fired. - i.timer.Reset(d) -} - -// handleIdleTimeout is the timer callback that is invoked upon expiry of the -// configured idle timeout. The channel is considered inactive if there are no -// ongoing calls and no RPC activity since the last time the timer fired. -func (i *idlenessManagerImpl) handleIdleTimeout() { - if i.isClosed() { - return - } - - if atomic.LoadInt32(&i.activeCallsCount) > 0 { - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // There has been activity on the channel since we last got here. Reset the - // timer and return. - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // Set the timer to fire after a duration of idle timeout, calculated - // from the time the most recent RPC completed. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 0) - i.resetIdleTimer(time.Duration(atomic.LoadInt64(&i.lastCallEndTime) + i.timeout - time.Now().UnixNano())) - return - } - - // This CAS operation is extremely likely to succeed given that there has - // been no activity since the last time we were here. Setting the - // activeCallsCount to -math.MaxInt32 indicates to onCallBegin() that the - // channel is either in idle mode or is trying to get there. - if !atomic.CompareAndSwapInt32(&i.activeCallsCount, 0, -math.MaxInt32) { - // This CAS operation can fail if an RPC started after we checked for - // activity at the top of this method, or one was ongoing from before - // the last time we were here. In both case, reset the timer and return. - i.resetIdleTimer(time.Duration(i.timeout)) - return - } - - // Now that we've set the active calls count to -math.MaxInt32, it's time to - // actually move to idle mode. - if i.tryEnterIdleMode() { - // Successfully entered idle mode. No timer needed until we exit idle. - return - } - - // Failed to enter idle mode due to a concurrent RPC that kept the channel - // active, or because of an error from the channel. Undo the attempt to - // enter idle, and reset the timer to try again later. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.resetIdleTimer(time.Duration(i.timeout)) -} - -// tryEnterIdleMode instructs the channel to enter idle mode. But before -// that, it performs a last minute check to ensure that no new RPC has come in, -// making the channel active. -// -// Return value indicates whether or not the channel moved to idle mode. -// -// Holds idleMu which ensures mutual exclusion with exitIdleMode. -func (i *idlenessManagerImpl) tryEnterIdleMode() bool { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if atomic.LoadInt32(&i.activeCallsCount) != -math.MaxInt32 { - // We raced and lost to a new RPC. Very rare, but stop entering idle. - return false - } - if atomic.LoadInt32(&i.activeSinceLastTimerCheck) == 1 { - // An very short RPC could have come in (and also finished) after we - // checked for calls count and activity in handleIdleTimeout(), but - // before the CAS operation. So, we need to check for activity again. - return false - } - - // No new RPCs have come in since we last set the active calls count value - // -math.MaxInt32 in the timer callback. And since we have the lock, it is - // safe to enter idle mode now. - if err := i.enforcer.enterIdleMode(); err != nil { - logger.Errorf("Failed to enter idle mode: %v", err) - return false - } - - // Successfully entered idle mode. - i.actuallyIdle = true - return true -} - -// onCallBegin is invoked at the start of every RPC. -func (i *idlenessManagerImpl) onCallBegin() error { - if i.isClosed() { - return nil - } - - if atomic.AddInt32(&i.activeCallsCount, 1) > 0 { - // Channel is not idle now. Set the activity bit and allow the call. - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil - } - - // Channel is either in idle mode or is in the process of moving to idle - // mode. Attempt to exit idle mode to allow this RPC. - if err := i.exitIdleMode(); err != nil { - // Undo the increment to calls count, and return an error causing the - // RPC to fail. - atomic.AddInt32(&i.activeCallsCount, -1) - return err - } - - atomic.StoreInt32(&i.activeSinceLastTimerCheck, 1) - return nil -} - -// exitIdleMode instructs the channel to exit idle mode. -// -// Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. -func (i *idlenessManagerImpl) exitIdleMode() error { - i.idleMu.Lock() - defer i.idleMu.Unlock() - - if !i.actuallyIdle { - // This can happen in two scenarios: - // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called - // tryEnterIdleMode(). But before the latter could grab the lock, an RPC - // came in and onCallBegin() noticed that the calls count is negative. - // - Channel is in idle mode, and multiple new RPCs come in at the same - // time, all of them notice a negative calls count in onCallBegin and get - // here. The first one to get the lock would got the channel to exit idle. - // - // Either way, nothing to do here. - return nil - } - - if err := i.enforcer.exitIdleMode(); err != nil { - return fmt.Errorf("channel failed to exit idle mode: %v", err) - } - - // Undo the idle entry process. This also respects any new RPC attempts. - atomic.AddInt32(&i.activeCallsCount, math.MaxInt32) - i.actuallyIdle = false - - // Start a new timer to fire after the configured idle timeout. - i.timer = timeAfterFunc(time.Duration(i.timeout), i.handleIdleTimeout) - return nil -} - -// onCallEnd is invoked at the end of every RPC. -func (i *idlenessManagerImpl) onCallEnd() { - if i.isClosed() { - return - } - - // Record the time at which the most recent call finished. - atomic.StoreInt64(&i.lastCallEndTime, time.Now().UnixNano()) - - // Decrement the active calls count. This count can temporarily go negative - // when the timer callback is in the process of moving the channel to idle - // mode, but one or more RPCs come in and complete before the timer callback - // can get done with the process of moving to idle mode. - atomic.AddInt32(&i.activeCallsCount, -1) -} - -func (i *idlenessManagerImpl) isClosed() bool { - return atomic.LoadInt32(&i.closed) == 1 -} - -func (i *idlenessManagerImpl) close() { - atomic.StoreInt32(&i.closed, 1) - - i.idleMu.Lock() - i.timer.Stop() - i.timer = nil - i.idleMu.Unlock() -} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/interceptor.go b/.ci/providerlint/vendor/google.golang.org/grpc/interceptor.go index bb96ef57be8..877d78fc3d0 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/interceptor.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/interceptor.go @@ -23,7 +23,7 @@ import ( ) // UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. -type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error +type UnaryInvoker func(ctx context.Context, method string, req, reply any, cc *ClientConn, opts ...CallOption) error // UnaryClientInterceptor intercepts the execution of a unary RPC on the client. // Unary interceptors can be specified as a DialOption, using @@ -40,7 +40,7 @@ type UnaryInvoker func(ctx context.Context, method string, req, reply interface{ // defaults from the ClientConn as well as per-call options. // // The returned error must be compatible with the status package. -type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply any, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error // Streamer is called by StreamClientInterceptor to create a ClientStream. type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) @@ -66,7 +66,7 @@ type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *Cli // server side. All per-rpc information may be mutated by the interceptor. type UnaryServerInfo struct { // Server is the service implementation the user provides. This is read-only. - Server interface{} + Server any // FullMethod is the full RPC method string, i.e., /package.service/method. FullMethod string } @@ -78,13 +78,13 @@ type UnaryServerInfo struct { // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) +type UnaryHandler func(ctx context.Context, req any) (any, error) // UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info // contains all the information of this RPC the interceptor can operate on. And handler is the wrapper // of the service method implementation. It is the responsibility of the interceptor to invoke handler // to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) +type UnaryServerInterceptor func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (resp any, err error) // StreamServerInfo consists of various information about a streaming RPC on // server side. All per-rpc information may be mutated by the interceptor. @@ -101,4 +101,4 @@ type StreamServerInfo struct { // info contains all the information of this RPC the interceptor can operate on. And handler is the // service method implementation. It is the responsibility of the interceptor to invoke handler to // complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error +type StreamServerInterceptor func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da53..fed1c011a32 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go index 08666f62a7c..3c594e6e4e5 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancer/gracefulswitch/gracefulswitch.go @@ -200,8 +200,8 @@ func (gsb *Balancer) ExitIdle() { } } -// UpdateSubConnState forwards the update to the appropriate child. -func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { +// updateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) updateSubConnState(sc balancer.SubConn, state balancer.SubConnState, cb func(balancer.SubConnState)) { gsb.currentMu.Lock() defer gsb.currentMu.Unlock() gsb.mu.Lock() @@ -214,13 +214,26 @@ func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubC } else if gsb.balancerPending != nil && gsb.balancerPending.subconns[sc] { balToUpdate = gsb.balancerPending } - gsb.mu.Unlock() if balToUpdate == nil { // SubConn belonged to a stale lb policy that has not yet fully closed, // or the balancer was already closed. + gsb.mu.Unlock() return } - balToUpdate.UpdateSubConnState(sc, state) + if state.ConnectivityState == connectivity.Shutdown { + delete(balToUpdate.subconns, sc) + } + gsb.mu.Unlock() + if cb != nil { + cb(state) + } else { + balToUpdate.UpdateSubConnState(sc, state) + } +} + +// UpdateSubConnState forwards the update to the appropriate child. +func (gsb *Balancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { + gsb.updateSubConnState(sc, state, nil) } // Close closes any active child balancers. @@ -242,7 +255,7 @@ func (gsb *Balancer) Close() { // // It implements the balancer.ClientConn interface and is passed down in that // capacity to the wrapped balancer. It maintains a set of subConns created by -// the wrapped balancer and calls from the latter to create/update/remove +// the wrapped balancer and calls from the latter to create/update/shutdown // SubConns update this set before being forwarded to the parent ClientConn. // State updates from the wrapped balancer can result in invocation of the // graceful switch logic. @@ -254,21 +267,10 @@ type balancerWrapper struct { subconns map[balancer.SubConn]bool // subconns created by this balancer } -func (bw *balancerWrapper) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) { - if state.ConnectivityState == connectivity.Shutdown { - bw.gsb.mu.Lock() - delete(bw.subconns, sc) - bw.gsb.mu.Unlock() - } - // There is no need to protect this read with a mutex, as the write to the - // Balancer field happens in SwitchTo, which completes before this can be - // called. - bw.Balancer.UpdateSubConnState(sc, state) -} - -// Close closes the underlying LB policy and removes the subconns it created. bw -// must not be referenced via balancerCurrent or balancerPending in gsb when -// called. gsb.mu must not be held. Does not panic with a nil receiver. +// Close closes the underlying LB policy and shuts down the subconns it +// created. bw must not be referenced via balancerCurrent or balancerPending in +// gsb when called. gsb.mu must not be held. Does not panic with a nil +// receiver. func (bw *balancerWrapper) Close() { // before Close is called. if bw == nil { @@ -281,7 +283,7 @@ func (bw *balancerWrapper) Close() { bw.Balancer.Close() bw.gsb.mu.Lock() for sc := range bw.subconns { - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() } bw.gsb.mu.Unlock() } @@ -335,13 +337,16 @@ func (bw *balancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.Ne } bw.gsb.mu.Unlock() + var sc balancer.SubConn + oldListener := opts.StateListener + opts.StateListener = func(state balancer.SubConnState) { bw.gsb.updateSubConnState(sc, state, oldListener) } sc, err := bw.gsb.cc.NewSubConn(addrs, opts) if err != nil { return nil, err } bw.gsb.mu.Lock() if !bw.gsb.balancerCurrentOrPending(bw) { // balancer was closed during this call - bw.gsb.cc.RemoveSubConn(sc) + sc.Shutdown() bw.gsb.mu.Unlock() return nil, fmt.Errorf("%T at address %p that called NewSubConn is deleted", bw, bw) } @@ -360,13 +365,9 @@ func (bw *balancerWrapper) ResolveNow(opts resolver.ResolveNowOptions) { } func (bw *balancerWrapper) RemoveSubConn(sc balancer.SubConn) { - bw.gsb.mu.Lock() - if !bw.gsb.balancerCurrentOrPending(bw) { - bw.gsb.mu.Unlock() - return - } - bw.gsb.mu.Unlock() - bw.gsb.cc.RemoveSubConn(sc) + // Note: existing third party balancers may call this, so it must remain + // until RemoveSubConn is fully removed. + sc.Shutdown() } func (bw *balancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resolver.Address) { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancerload/load.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancerload/load.go index 3a905d96657..94a08d6875a 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancerload/load.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/balancerload/load.go @@ -25,7 +25,7 @@ import ( // Parser converts loads from metadata into a concrete type. type Parser interface { // Parse parses loads from metadata. - Parse(md metadata.MD) interface{} + Parse(md metadata.MD) any } var parser Parser @@ -38,7 +38,7 @@ func SetParser(lr Parser) { } // Parse calls parser.Read(). -func Parse(md metadata.MD) interface{} { +func Parse(md metadata.MD) any { if parser == nil { return nil } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go index 6c3f632215f..0f31274a3cc 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/binarylog/method_logger.go @@ -230,7 +230,7 @@ type ClientMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ClientMessage) toProto() *binlogpb.GrpcLogEntry { @@ -270,7 +270,7 @@ type ServerMessage struct { OnClientSide bool // Message can be a proto.Message or []byte. Other messages formats are not // supported. - Message interface{} + Message any } func (c *ServerMessage) toProto() *binlogpb.GrpcLogEntry { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/buffer/unbounded.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/buffer/unbounded.go index 81c2f5fd761..11f91668ac9 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/buffer/unbounded.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/buffer/unbounded.go @@ -18,7 +18,10 @@ // Package buffer provides an implementation of an unbounded buffer. package buffer -import "sync" +import ( + "errors" + "sync" +) // Unbounded is an implementation of an unbounded buffer which does not use // extra goroutines. This is typically used for passing updates from one entity @@ -28,49 +31,50 @@ import "sync" // the underlying mutex used for synchronization. // // Unbounded supports values of any type to be stored in it by using a channel -// of `interface{}`. This means that a call to Put() incurs an extra memory -// allocation, and also that users need a type assertion while reading. For -// performance critical code paths, using Unbounded is strongly discouraged and -// defining a new type specific implementation of this buffer is preferred. See +// of `any`. This means that a call to Put() incurs an extra memory allocation, +// and also that users need a type assertion while reading. For performance +// critical code paths, using Unbounded is strongly discouraged and defining a +// new type specific implementation of this buffer is preferred. See // internal/transport/transport.go for an example of this. type Unbounded struct { - c chan interface{} + c chan any closed bool + closing bool mu sync.Mutex - backlog []interface{} + backlog []any } // NewUnbounded returns a new instance of Unbounded. func NewUnbounded() *Unbounded { - return &Unbounded{c: make(chan interface{}, 1)} + return &Unbounded{c: make(chan any, 1)} } +var errBufferClosed = errors.New("Put called on closed buffer.Unbounded") + // Put adds t to the unbounded buffer. -func (b *Unbounded) Put(t interface{}) { +func (b *Unbounded) Put(t any) error { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return + if b.closing { + return errBufferClosed } if len(b.backlog) == 0 { select { case b.c <- t: - return + return nil default: } } b.backlog = append(b.backlog, t) + return nil } -// Load sends the earliest buffered data, if any, onto the read channel -// returned by Get(). Users are expected to call this every time they read a +// Load sends the earliest buffered data, if any, onto the read channel returned +// by Get(). Users are expected to call this every time they successfully read a // value from the read channel. func (b *Unbounded) Load() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { - return - } if len(b.backlog) > 0 { select { case b.c <- b.backlog[0]: @@ -78,6 +82,8 @@ func (b *Unbounded) Load() { b.backlog = b.backlog[1:] default: } + } else if b.closing && !b.closed { + close(b.c) } } @@ -88,18 +94,23 @@ func (b *Unbounded) Load() { // send the next buffered value onto the channel if there is any. // // If the unbounded buffer is closed, the read channel returned by this method -// is closed. -func (b *Unbounded) Get() <-chan interface{} { +// is closed after all data is drained. +func (b *Unbounded) Get() <-chan any { return b.c } -// Close closes the unbounded buffer. +// Close closes the unbounded buffer. No subsequent data may be Put(), and the +// channel returned from Get() will be closed after all the data is read and +// Load() is called for the final time. func (b *Unbounded) Close() { b.mu.Lock() defer b.mu.Unlock() - if b.closed { + if b.closing { return } - b.closed = true - close(b.c) + b.closing = true + if len(b.backlog) == 0 { + b.closed = true + close(b.c) + } } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/funcs.go index 777cbcd7921..fc094f3441b 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -24,15 +24,14 @@ package channelz import ( - "context" "errors" - "fmt" "sort" "sync" "sync/atomic" "time" "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" ) const ( @@ -40,8 +39,11 @@ const ( ) var ( - db dbWrapper - idGen idGenerator + // IDGen is the global channelz entity ID generator. It should not be used + // outside this package except by tests. + IDGen IDGenerator + + db dbWrapper // EntryPerPage defines the number of channelz entries to be shown on a web page. EntryPerPage = int64(50) curState int32 @@ -52,14 +54,20 @@ var ( func TurnOn() { if !IsOn() { db.set(newChannelMap()) - idGen.reset() + IDGen.Reset() atomic.StoreInt32(&curState, 1) } } +func init() { + internal.ChannelzTurnOffForTesting = func() { + atomic.StoreInt32(&curState, 0) + } +} + // IsOn returns whether channelz data collection is on. func IsOn() bool { - return atomic.CompareAndSwapInt32(&curState, 1, 1) + return atomic.LoadInt32(&curState) == 1 } // SetMaxTraceEntry sets maximum number of trace entry per entity (i.e. channel/subchannel). @@ -97,43 +105,6 @@ func (d *dbWrapper) get() *channelMap { return d.DB } -// NewChannelzStorageForTesting initializes channelz data storage and id -// generator for testing purposes. -// -// Returns a cleanup function to be invoked by the test, which waits for up to -// 10s for all channelz state to be reset by the grpc goroutines when those -// entities get closed. This cleanup function helps with ensuring that tests -// don't mess up each other. -func NewChannelzStorageForTesting() (cleanup func() error) { - db.set(newChannelMap()) - idGen.reset() - - return func() error { - cm := db.get() - if cm == nil { - return nil - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - cm.mu.RLock() - topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets) - cm.mu.RUnlock() - - if err := ctx.Err(); err != nil { - return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets) - } - if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 { - return nil - } - <-ticker.C - } - } -} - // GetTopChannels returns a slice of top channel's ChannelMetric, along with a // boolean indicating whether there's more top channels to be queried for. // @@ -193,7 +164,7 @@ func GetServer(id int64) *ServerMetric { // // If channelz is not turned ON, the channelz database is not mutated. func RegisterChannel(c Channel, pid *Identifier, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() var parent int64 isTopChannel := true if pid != nil { @@ -229,7 +200,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er if pid == nil { return nil, errors.New("a SubChannel's parent id cannot be nil") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefSubChannel, id, pid), nil } @@ -251,7 +222,7 @@ func RegisterSubChannel(c Channel, pid *Identifier, ref string) (*Identifier, er // // If channelz is not turned ON, the channelz database is not mutated. func RegisterServer(s Server, ref string) *Identifier { - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefServer, id, nil) } @@ -277,7 +248,7 @@ func RegisterListenSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a ListenSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefListenSocket, id, pid), nil } @@ -297,7 +268,7 @@ func RegisterNormalSocket(s Socket, pid *Identifier, ref string) (*Identifier, e if pid == nil { return nil, errors.New("a NormalSocket's parent id cannot be 0") } - id := idGen.genID() + id := IDGen.genID() if !IsOn() { return newIdentifer(RefNormalSocket, id, pid), nil } @@ -776,14 +747,17 @@ func (c *channelMap) GetServer(id int64) *ServerMetric { return sm } -type idGenerator struct { +// IDGenerator is an incrementing atomic that tracks IDs for channelz entities. +type IDGenerator struct { id int64 } -func (i *idGenerator) reset() { +// Reset resets the generated ID back to zero. Should only be used at +// initialization or by tests sensitive to the ID number. +func (i *IDGenerator) Reset() { atomic.StoreInt64(&i.id, 0) } -func (i *idGenerator) genID() int64 { +func (i *IDGenerator) genID() int64 { return atomic.AddInt64(&i.id, 1) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/logging.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/logging.go index 8e13a3d2ce7..f89e6f77bbd 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/logging.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/logging.go @@ -31,7 +31,7 @@ func withParens(id *Identifier) string { } // Info logs and adds a trace event if channelz is on. -func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtInfo, @@ -39,7 +39,7 @@ func Info(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Infof logs and adds a trace event if channelz is on. -func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtInfo, @@ -47,7 +47,7 @@ func Infof(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...inter } // Warning logs and adds a trace event if channelz is on. -func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtWarning, @@ -55,7 +55,7 @@ func Warning(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Warningf logs and adds a trace event if channelz is on. -func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtWarning, @@ -63,7 +63,7 @@ func Warningf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...in } // Error logs and adds a trace event if channelz is on. -func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { +func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprint(args...), Severity: CtError, @@ -71,7 +71,7 @@ func Error(l grpclog.DepthLoggerV2, id *Identifier, args ...interface{}) { } // Errorf logs and adds a trace event if channelz is on. -func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...interface{}) { +func Errorf(l grpclog.DepthLoggerV2, id *Identifier, format string, args ...any) { AddTraceEvent(l, id, 1, &TraceEventDesc{ Desc: fmt.Sprintf(format, args...), Severity: CtError, diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/types.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/types.go index 7b2f350e2e6..1d4020f5379 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/types.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/types.go @@ -628,6 +628,7 @@ type tracedChannel interface { type channelTrace struct { cm *channelMap + clearCalled bool createdTime time.Time eventCount int64 mu sync.Mutex @@ -656,6 +657,10 @@ func (c *channelTrace) append(e *TraceEvent) { } func (c *channelTrace) clear() { + if c.clearCalled { + return + } + c.clearCalled = true c.mu.Lock() for _, e := range c.events { if e.RefID != 0 { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_linux.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_linux.go index 8d194e44e1d..98288c3f866 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_linux.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_linux.go @@ -23,7 +23,7 @@ import ( ) // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(socket interface{}) *SocketOptionData { +func GetSocketOption(socket any) *SocketOptionData { c, ok := socket.(syscall.Conn) if !ok { return nil diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go index 837ddc40240..b5568b22e20 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/channelz/util_nonlinux.go @@ -22,6 +22,6 @@ package channelz // GetSocketOption gets the socket option info of the conn. -func GetSocketOption(c interface{}) *SocketOptionData { +func GetSocketOption(c any) *SocketOptionData { return nil } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/credentials/credentials.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/credentials/credentials.go index 32c9b59033c..9deee7f6513 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/credentials/credentials.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/credentials/credentials.go @@ -25,12 +25,12 @@ import ( type requestInfoKey struct{} // NewRequestInfoContext creates a context with ri. -func NewRequestInfoContext(ctx context.Context, ri interface{}) context.Context { +func NewRequestInfoContext(ctx context.Context, ri any) context.Context { return context.WithValue(ctx, requestInfoKey{}, ri) } // RequestInfoFromContext extracts the RequestInfo from ctx. -func RequestInfoFromContext(ctx context.Context) interface{} { +func RequestInfoFromContext(ctx context.Context) any { return ctx.Value(requestInfoKey{}) } @@ -39,11 +39,11 @@ func RequestInfoFromContext(ctx context.Context) interface{} { type clientHandshakeInfoKey struct{} // ClientHandshakeInfoFromContext extracts the ClientHandshakeInfo from ctx. -func ClientHandshakeInfoFromContext(ctx context.Context) interface{} { +func ClientHandshakeInfoFromContext(ctx context.Context) any { return ctx.Value(clientHandshakeInfoKey{}) } // NewClientHandshakeInfoContext creates a context with chi. -func NewClientHandshakeInfoContext(ctx context.Context, chi interface{}) context.Context { +func NewClientHandshakeInfoContext(ctx context.Context, chi any) context.Context { return context.WithValue(ctx, clientHandshakeInfoKey{}, chi) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go index 77c2c0b89f6..685a3cb41b1 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go @@ -36,10 +36,10 @@ var ( // "GRPC_RING_HASH_CAP". This does not override the default bounds // checking which NACKs configs specifying ring sizes > 8*1024*1024 (~8M). RingHashCap = uint64FromEnv("GRPC_RING_HASH_CAP", 4096, 1, 8*1024*1024) - // PickFirstLBConfig is set if we should support configuration of the - // pick_first LB policy, which can be enabled by setting the environment - // variable "GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG" to "true". - PickFirstLBConfig = boolFromEnv("GRPC_EXPERIMENTAL_PICKFIRST_LB_CONFIG", false) + // LeastRequestLB is set if we should support the least_request_experimental + // LB policy, which can be enabled by setting the environment variable + // "GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST" to "true". + LeastRequestLB = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_LEAST_REQUEST", false) // ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS // handshakes that can be performed. ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 02b4b6a1c10..29f234acb1b 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -50,46 +50,7 @@ var ( // // When both bootstrap FileName and FileContent are set, FileName is used. XDSBootstrapFileContent = os.Getenv(XDSBootstrapFileContentEnv) - // XDSRingHash indicates whether ring hash support is enabled, which can be - // disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH" to "false". - XDSRingHash = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_RING_HASH", true) - // XDSClientSideSecurity is used to control processing of security - // configuration on the client-side. - // - // Note that there is no env var protection for the server-side because we - // have a brand new API on the server-side and users explicitly need to use - // the new API to get security integration on the server. - XDSClientSideSecurity = boolFromEnv("GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT", true) - // XDSAggregateAndDNS indicates whether processing of aggregated cluster and - // DNS cluster is enabled, which can be disabled by setting the environment - // variable "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" - // to "false". - XDSAggregateAndDNS = boolFromEnv("GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER", true) - - // XDSRBAC indicates whether xDS configured RBAC HTTP Filter is enabled, - // which can be disabled by setting the environment variable - // "GRPC_XDS_EXPERIMENTAL_RBAC" to "false". - XDSRBAC = boolFromEnv("GRPC_XDS_EXPERIMENTAL_RBAC", true) - // XDSOutlierDetection indicates whether outlier detection support is - // enabled, which can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "false". - XDSOutlierDetection = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION", true) - // XDSFederation indicates whether federation support is enabled, which can - // be enabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_FEDERATION" to "true". - XDSFederation = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FEDERATION", true) - - // XDSRLS indicates whether processing of Cluster Specifier plugins and - // support for the RLS CLuster Specifier is enabled, which can be disabled by - // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to - // "false". - XDSRLS = boolFromEnv("GRPC_EXPERIMENTAL_XDS_RLS_LB", true) // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv("GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI") - // XDSCustomLBPolicy indicates whether Custom LB Policies are enabled, which - // can be disabled by setting the environment variable - // "GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG" to "false". - XDSCustomLBPolicy = boolFromEnv("GRPC_EXPERIMENTAL_XDS_CUSTOM_LB_CONFIG", true) ) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/experimental.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/experimental.go new file mode 100644 index 00000000000..7f7044e1731 --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/experimental.go @@ -0,0 +1,28 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +var ( + // WithRecvBufferPool is implemented by the grpc package and returns a dial + // option to configure a shared buffer pool for a grpc.ClientConn. + WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption + + // RecvBufferPool is implemented by the grpc package and returns a server + // option to configure a shared buffer pool for a grpc.Server. + RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption +) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index b68e26a3649..bfc45102ab2 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -30,7 +30,7 @@ var Logger LoggerV2 var DepthLogger DepthLoggerV2 // InfoDepth logs to the INFO log at the specified depth. -func InfoDepth(depth int, args ...interface{}) { +func InfoDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.InfoDepth(depth, args...) } else { @@ -39,7 +39,7 @@ func InfoDepth(depth int, args ...interface{}) { } // WarningDepth logs to the WARNING log at the specified depth. -func WarningDepth(depth int, args ...interface{}) { +func WarningDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.WarningDepth(depth, args...) } else { @@ -48,7 +48,7 @@ func WarningDepth(depth int, args ...interface{}) { } // ErrorDepth logs to the ERROR log at the specified depth. -func ErrorDepth(depth int, args ...interface{}) { +func ErrorDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.ErrorDepth(depth, args...) } else { @@ -57,7 +57,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // FatalDepth logs to the FATAL log at the specified depth. -func FatalDepth(depth int, args ...interface{}) { +func FatalDepth(depth int, args ...any) { if DepthLogger != nil { DepthLogger.FatalDepth(depth, args...) } else { @@ -71,35 +71,35 @@ func FatalDepth(depth int, args ...interface{}) { // is defined here to avoid a circular dependency. type LoggerV2 interface { // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. - Info(args ...interface{}) + Info(args ...any) // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. - Infoln(args ...interface{}) + Infoln(args ...any) // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. - Infof(format string, args ...interface{}) + Infof(format string, args ...any) // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. - Warning(args ...interface{}) + Warning(args ...any) // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. - Warningln(args ...interface{}) + Warningln(args ...any) // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. - Warningf(format string, args ...interface{}) + Warningf(format string, args ...any) // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. - Error(args ...interface{}) + Error(args ...any) // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. - Errorln(args ...interface{}) + Errorln(args ...any) // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. - Errorf(format string, args ...interface{}) + Errorf(format string, args ...any) // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatal(args ...interface{}) + Fatal(args ...any) // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalln(args ...interface{}) + Fatalln(args ...any) // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. // gRPC ensures that all Fatal logs will exit with os.Exit(1). // Implementations may also call os.Exit() with a non-zero exit code. - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) // V reports whether verbosity level l is at least the requested verbose level. V(l int) bool } @@ -116,11 +116,11 @@ type LoggerV2 interface { // later release. type DepthLoggerV2 interface { // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. - InfoDepth(depth int, args ...interface{}) + InfoDepth(depth int, args ...any) // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. - WarningDepth(depth int, args ...interface{}) + WarningDepth(depth int, args ...any) // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. - ErrorDepth(depth int, args ...interface{}) + ErrorDepth(depth int, args ...any) // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. - FatalDepth(depth int, args ...interface{}) + FatalDepth(depth int, args ...any) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go index 02224b42ca8..faa998de763 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpclog/prefixLogger.go @@ -31,7 +31,7 @@ type PrefixLogger struct { } // Infof does info logging. -func (pl *PrefixLogger) Infof(format string, args ...interface{}) { +func (pl *PrefixLogger) Infof(format string, args ...any) { if pl != nil { // Handle nil, so the tests can pass in a nil logger. format = pl.prefix + format @@ -42,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...interface{}) { } // Warningf does warning logging. -func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { +func (pl *PrefixLogger) Warningf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.WarningDepth(1, fmt.Sprintf(format, args...)) @@ -52,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...interface{}) { } // Errorf does error logging. -func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { +func (pl *PrefixLogger) Errorf(format string, args ...any) { if pl != nil { format = pl.prefix + format pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...)) @@ -62,7 +62,7 @@ func (pl *PrefixLogger) Errorf(format string, args ...interface{}) { } // Debugf does info logging at verbose level 2. -func (pl *PrefixLogger) Debugf(format string, args ...interface{}) { +func (pl *PrefixLogger) Debugf(format string, args ...any) { // TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe // rewrite PrefixLogger a little to ensure that we don't use the global // `Logger` here, and instead use the `logger` field. diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go index 37b8d4117e7..f7f40a16ace 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/callback_serializer.go @@ -20,7 +20,6 @@ package grpcsync import ( "context" - "sync" "google.golang.org/grpc/internal/buffer" ) @@ -32,14 +31,12 @@ import ( // // This type is safe for concurrent access. type CallbackSerializer struct { - // Done is closed once the serializer is shut down completely, i.e all + // done is closed once the serializer is shut down completely, i.e all // scheduled callbacks are executed and the serializer has deallocated all // its resources. - Done chan struct{} + done chan struct{} callbacks *buffer.Unbounded - closedMu sync.Mutex - closed bool } // NewCallbackSerializer returns a new CallbackSerializer instance. The provided @@ -48,12 +45,12 @@ type CallbackSerializer struct { // callbacks will be added once this context is canceled, and any pending un-run // callbacks will be executed before the serializer is shut down. func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { - t := &CallbackSerializer{ - Done: make(chan struct{}), + cs := &CallbackSerializer{ + done: make(chan struct{}), callbacks: buffer.NewUnbounded(), } - go t.run(ctx) - return t + go cs.run(ctx) + return cs } // Schedule adds a callback to be scheduled after existing callbacks are run. @@ -64,56 +61,40 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer { // Return value indicates if the callback was successfully added to the list of // callbacks to be executed by the serializer. It is not possible to add // callbacks once the context passed to NewCallbackSerializer is cancelled. -func (t *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { - t.closedMu.Lock() - defer t.closedMu.Unlock() - - if t.closed { - return false - } - t.callbacks.Put(f) - return true +func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool { + return cs.callbacks.Put(f) == nil } -func (t *CallbackSerializer) run(ctx context.Context) { - var backlog []func(context.Context) +func (cs *CallbackSerializer) run(ctx context.Context) { + defer close(cs.done) - defer close(t.Done) + // TODO: when Go 1.21 is the oldest supported version, this loop and Close + // can be replaced with: + // + // context.AfterFunc(ctx, cs.callbacks.Close) for ctx.Err() == nil { select { case <-ctx.Done(): // Do nothing here. Next iteration of the for loop will not happen, // since ctx.Err() would be non-nil. - case callback, ok := <-t.callbacks.Get(): - if !ok { - return - } - t.callbacks.Load() - callback.(func(ctx context.Context))(ctx) + case cb := <-cs.callbacks.Get(): + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } - // Fetch pending callbacks if any, and execute them before returning from - // this method and closing t.Done. - t.closedMu.Lock() - t.closed = true - backlog = t.fetchPendingCallbacks() - t.callbacks.Close() - t.closedMu.Unlock() - for _, b := range backlog { - b(ctx) + // Close the buffer to prevent new callbacks from being added. + cs.callbacks.Close() + + // Run all pending callbacks. + for cb := range cs.callbacks.Get() { + cs.callbacks.Load() + cb.(func(context.Context))(ctx) } } -func (t *CallbackSerializer) fetchPendingCallbacks() []func(context.Context) { - var backlog []func(context.Context) - for { - select { - case b := <-t.callbacks.Get(): - backlog = append(backlog, b.(func(context.Context))) - t.callbacks.Load() - default: - return backlog - } - } +// Done returns a channel that is closed after the context passed to +// NewCallbackSerializer is canceled and all callbacks have been executed. +func (cs *CallbackSerializer) Done() <-chan struct{} { + return cs.done } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go index f58b5ffa6b1..aef8cec1ab0 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/grpcsync/pubsub.go @@ -29,7 +29,7 @@ import ( type Subscriber interface { // OnMessage is invoked when a new message is published. Implementations // must not block in this method. - OnMessage(msg interface{}) + OnMessage(msg any) } // PubSub is a simple one-to-many publish-subscribe system that supports @@ -40,25 +40,23 @@ type Subscriber interface { // subscribers interested in receiving these messages register a callback // via the Subscribe() method. // -// Once a PubSub is stopped, no more messages can be published, and -// it is guaranteed that no more subscriber callback will be invoked. +// Once a PubSub is stopped, no more messages can be published, but any pending +// published messages will be delivered to the subscribers. Done may be used +// to determine when all published messages have been delivered. type PubSub struct { - cs *CallbackSerializer - cancel context.CancelFunc + cs *CallbackSerializer // Access to the below fields are guarded by this mutex. mu sync.Mutex - msg interface{} + msg any subscribers map[Subscriber]bool - stopped bool } -// NewPubSub returns a new PubSub instance. -func NewPubSub() *PubSub { - ctx, cancel := context.WithCancel(context.Background()) +// NewPubSub returns a new PubSub instance. Users should cancel the +// provided context to shutdown the PubSub. +func NewPubSub(ctx context.Context) *PubSub { return &PubSub{ cs: NewCallbackSerializer(ctx), - cancel: cancel, subscribers: map[Subscriber]bool{}, } } @@ -75,10 +73,6 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { ps.mu.Lock() defer ps.mu.Unlock() - if ps.stopped { - return func() {} - } - ps.subscribers[sub] = true if ps.msg != nil { @@ -102,14 +96,10 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) { // Publish publishes the provided message to the PubSub, and invokes // callbacks registered by subscribers asynchronously. -func (ps *PubSub) Publish(msg interface{}) { +func (ps *PubSub) Publish(msg any) { ps.mu.Lock() defer ps.mu.Unlock() - if ps.stopped { - return - } - ps.msg = msg for sub := range ps.subscribers { s := sub @@ -124,13 +114,8 @@ func (ps *PubSub) Publish(msg interface{}) { } } -// Stop shuts down the PubSub and releases any resources allocated by it. -// It is guaranteed that no subscriber callbacks would be invoked once this -// method returns. -func (ps *PubSub) Stop() { - ps.mu.Lock() - defer ps.mu.Unlock() - ps.stopped = true - - ps.cancel() +// Done returns a channel that is closed after the context passed to NewPubSub +// is canceled and all updates have been sent to subscribers. +func (ps *PubSub) Done() <-chan struct{} { + return ps.cs.Done() } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/idle/idle.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/idle/idle.go new file mode 100644 index 00000000000..fe49cb74c55 --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/idle/idle.go @@ -0,0 +1,278 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package idle contains a component for managing idleness (entering and exiting) +// based on RPC activity. +package idle + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +// For overriding in unit tests. +var timeAfterFunc = func(d time.Duration, f func()) *time.Timer { + return time.AfterFunc(d, f) +} + +// Enforcer is the functionality provided by grpc.ClientConn to enter +// and exit from idle mode. +type Enforcer interface { + ExitIdleMode() error + EnterIdleMode() +} + +// Manager implements idleness detection and calls the configured Enforcer to +// enter/exit idle mode when appropriate. Must be created by NewManager. +type Manager struct { + // State accessed atomically. + lastCallEndTime int64 // Unix timestamp in nanos; time when the most recent RPC completed. + activeCallsCount int32 // Count of active RPCs; -math.MaxInt32 means channel is idle or is trying to get there. + activeSinceLastTimerCheck int32 // Boolean; True if there was an RPC since the last timer callback. + closed int32 // Boolean; True when the manager is closed. + + // Can be accessed without atomics or mutex since these are set at creation + // time and read-only after that. + enforcer Enforcer // Functionality provided by grpc.ClientConn. + timeout time.Duration + + // idleMu is used to guarantee mutual exclusion in two scenarios: + // - Opposing intentions: + // - a: Idle timeout has fired and handleIdleTimeout() is trying to put + // the channel in idle mode because the channel has been inactive. + // - b: At the same time an RPC is made on the channel, and OnCallBegin() + // is trying to prevent the channel from going idle. + // - Competing intentions: + // - The channel is in idle mode and there are multiple RPCs starting at + // the same time, all trying to move the channel out of idle. Only one + // of them should succeed in doing so, while the other RPCs should + // piggyback on the first one and be successfully handled. + idleMu sync.RWMutex + actuallyIdle bool + timer *time.Timer +} + +// NewManager creates a new idleness manager implementation for the +// given idle timeout. It begins in idle mode. +func NewManager(enforcer Enforcer, timeout time.Duration) *Manager { + return &Manager{ + enforcer: enforcer, + timeout: timeout, + actuallyIdle: true, + activeCallsCount: -math.MaxInt32, + } +} + +// resetIdleTimerLocked resets the idle timer to the given duration. Called +// when exiting idle mode or when the timer fires and we need to reset it. +func (m *Manager) resetIdleTimerLocked(d time.Duration) { + if m.isClosed() || m.timeout == 0 || m.actuallyIdle { + return + } + + // It is safe to ignore the return value from Reset() because this method is + // only ever called from the timer callback or when exiting idle mode. + if m.timer != nil { + m.timer.Stop() + } + m.timer = timeAfterFunc(d, m.handleIdleTimeout) +} + +func (m *Manager) resetIdleTimer(d time.Duration) { + m.idleMu.Lock() + defer m.idleMu.Unlock() + m.resetIdleTimerLocked(d) +} + +// handleIdleTimeout is the timer callback that is invoked upon expiry of the +// configured idle timeout. The channel is considered inactive if there are no +// ongoing calls and no RPC activity since the last time the timer fired. +func (m *Manager) handleIdleTimeout() { + if m.isClosed() { + return + } + + if atomic.LoadInt32(&m.activeCallsCount) > 0 { + m.resetIdleTimer(m.timeout) + return + } + + // There has been activity on the channel since we last got here. Reset the + // timer and return. + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // Set the timer to fire after a duration of idle timeout, calculated + // from the time the most recent RPC completed. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 0) + m.resetIdleTimer(time.Duration(atomic.LoadInt64(&m.lastCallEndTime)-time.Now().UnixNano()) + m.timeout) + return + } + + // Now that we've checked that there has been no activity, attempt to enter + // idle mode, which is very likely to succeed. + if m.tryEnterIdleMode() { + // Successfully entered idle mode. No timer needed until we exit idle. + return + } + + // Failed to enter idle mode due to a concurrent RPC that kept the channel + // active, or because of an error from the channel. Undo the attempt to + // enter idle, and reset the timer to try again later. + m.resetIdleTimer(m.timeout) +} + +// tryEnterIdleMode instructs the channel to enter idle mode. But before +// that, it performs a last minute check to ensure that no new RPC has come in, +// making the channel active. +// +// Return value indicates whether or not the channel moved to idle mode. +// +// Holds idleMu which ensures mutual exclusion with exitIdleMode. +func (m *Manager) tryEnterIdleMode() bool { + // Setting the activeCallsCount to -math.MaxInt32 indicates to OnCallBegin() + // that the channel is either in idle mode or is trying to get there. + if !atomic.CompareAndSwapInt32(&m.activeCallsCount, 0, -math.MaxInt32) { + // This CAS operation can fail if an RPC started after we checked for + // activity in the timer handler, or one was ongoing from before the + // last time the timer fired, or if a test is attempting to enter idle + // mode without checking. In all cases, abort going into idle mode. + return false + } + // N.B. if we fail to enter idle mode after this, we must re-add + // math.MaxInt32 to m.activeCallsCount. + + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if atomic.LoadInt32(&m.activeCallsCount) != -math.MaxInt32 { + // We raced and lost to a new RPC. Very rare, but stop entering idle. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + if atomic.LoadInt32(&m.activeSinceLastTimerCheck) == 1 { + // A very short RPC could have come in (and also finished) after we + // checked for calls count and activity in handleIdleTimeout(), but + // before the CAS operation. So, we need to check for activity again. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + return false + } + + // No new RPCs have come in since we set the active calls count value to + // -math.MaxInt32. And since we have the lock, it is safe to enter idle mode + // unconditionally now. + m.enforcer.EnterIdleMode() + m.actuallyIdle = true + return true +} + +func (m *Manager) EnterIdleModeForTesting() { + m.tryEnterIdleMode() +} + +// OnCallBegin is invoked at the start of every RPC. +func (m *Manager) OnCallBegin() error { + if m.isClosed() { + return nil + } + + if atomic.AddInt32(&m.activeCallsCount, 1) > 0 { + // Channel is not idle now. Set the activity bit and allow the call. + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil + } + + // Channel is either in idle mode or is in the process of moving to idle + // mode. Attempt to exit idle mode to allow this RPC. + if err := m.ExitIdleMode(); err != nil { + // Undo the increment to calls count, and return an error causing the + // RPC to fail. + atomic.AddInt32(&m.activeCallsCount, -1) + return err + } + + atomic.StoreInt32(&m.activeSinceLastTimerCheck, 1) + return nil +} + +// ExitIdleMode instructs m to call the enforcer's ExitIdleMode and update m's +// internal state. +func (m *Manager) ExitIdleMode() error { + // Holds idleMu which ensures mutual exclusion with tryEnterIdleMode. + m.idleMu.Lock() + defer m.idleMu.Unlock() + + if m.isClosed() || !m.actuallyIdle { + // This can happen in three scenarios: + // - handleIdleTimeout() set the calls count to -math.MaxInt32 and called + // tryEnterIdleMode(). But before the latter could grab the lock, an RPC + // came in and OnCallBegin() noticed that the calls count is negative. + // - Channel is in idle mode, and multiple new RPCs come in at the same + // time, all of them notice a negative calls count in OnCallBegin and get + // here. The first one to get the lock would got the channel to exit idle. + // - Channel is not in idle mode, and the user calls Connect which calls + // m.ExitIdleMode. + // + // In any case, there is nothing to do here. + return nil + } + + if err := m.enforcer.ExitIdleMode(); err != nil { + return fmt.Errorf("failed to exit idle mode: %w", err) + } + + // Undo the idle entry process. This also respects any new RPC attempts. + atomic.AddInt32(&m.activeCallsCount, math.MaxInt32) + m.actuallyIdle = false + + // Start a new timer to fire after the configured idle timeout. + m.resetIdleTimerLocked(m.timeout) + return nil +} + +// OnCallEnd is invoked at the end of every RPC. +func (m *Manager) OnCallEnd() { + if m.isClosed() { + return + } + + // Record the time at which the most recent call finished. + atomic.StoreInt64(&m.lastCallEndTime, time.Now().UnixNano()) + + // Decrement the active calls count. This count can temporarily go negative + // when the timer callback is in the process of moving the channel to idle + // mode, but one or more RPCs come in and complete before the timer callback + // can get done with the process of moving to idle mode. + atomic.AddInt32(&m.activeCallsCount, -1) +} + +func (m *Manager) isClosed() bool { + return atomic.LoadInt32(&m.closed) == 1 +} + +func (m *Manager) Close() { + atomic.StoreInt32(&m.closed, 1) + + m.idleMu.Lock() + if m.timer != nil { + m.timer.Stop() + m.timer = nil + } + m.idleMu.Unlock() +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/internal.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/internal.go index 42ff39c8444..2549fe8e3b8 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/internal.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/internal.go @@ -30,7 +30,7 @@ import ( var ( // WithHealthCheckFunc is set by dialoptions.go - WithHealthCheckFunc interface{} // func (HealthChecker) DialOption + WithHealthCheckFunc any // func (HealthChecker) DialOption // HealthCheckFunc is used to provide client-side LB channel health checking HealthCheckFunc HealthChecker // BalancerUnregister is exported by package balancer to unregister a balancer. @@ -38,8 +38,12 @@ var ( // KeepaliveMinPingTime is the minimum ping interval. This must be 10s by // default, but tests may wish to set it lower for convenience. KeepaliveMinPingTime = 10 * time.Second + // KeepaliveMinServerPingTime is the minimum ping interval for servers. + // This must be 1s by default, but tests may wish to set it lower for + // convenience. + KeepaliveMinServerPingTime = time.Second // ParseServiceConfig parses a JSON representation of the service config. - ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult + ParseServiceConfig any // func(string) *serviceconfig.ParseResult // EqualServiceConfigForTesting is for testing service config generation and // parsing. Both a and b should be returned by ParseServiceConfig. // This function compares the config without rawJSON stripped, in case the @@ -49,33 +53,38 @@ var ( // given name. This is set by package certprovider for use from xDS // bootstrap code while parsing certificate provider configs in the // bootstrap file. - GetCertificateProviderBuilder interface{} // func(string) certprovider.Builder + GetCertificateProviderBuilder any // func(string) certprovider.Builder // GetXDSHandshakeInfoForTesting returns a pointer to the xds.HandshakeInfo // stored in the passed in attributes. This is set by // credentials/xds/xds.go. - GetXDSHandshakeInfoForTesting interface{} // func (*attributes.Attributes) *xds.HandshakeInfo + GetXDSHandshakeInfoForTesting any // func (*attributes.Attributes) *xds.HandshakeInfo // GetServerCredentials returns the transport credentials configured on a // gRPC server. An xDS-enabled server needs to know what type of credentials // is configured on the underlying gRPC server. This is set by server.go. - GetServerCredentials interface{} // func (*grpc.Server) credentials.TransportCredentials + GetServerCredentials any // func (*grpc.Server) credentials.TransportCredentials // CanonicalString returns the canonical string of the code defined here: // https://github.com/grpc/grpc/blob/master/doc/statuscodes.md. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - CanonicalString interface{} // func (codes.Code) string + CanonicalString any // func (codes.Code) string // DrainServerTransports initiates a graceful close of existing connections // on a gRPC server accepted on the provided listener address. An // xDS-enabled server invokes this method on a grpc.Server when a particular // listener moves to "not-serving" mode. - DrainServerTransports interface{} // func(*grpc.Server, string) + DrainServerTransports any // func(*grpc.Server, string) + // IsRegisteredMethod returns whether the passed in method is registered as + // a method on the server. + IsRegisteredMethod any // func(*grpc.Server, string) bool + // ServerFromContext returns the server from the context. + ServerFromContext any // func(context.Context) *grpc.Server // AddGlobalServerOptions adds an array of ServerOption that will be // effective globally for newly created servers. The priority will be: 1. // user-provided; 2. this method; 3. default values. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalServerOptions interface{} // func(opt ...ServerOption) + AddGlobalServerOptions any // func(opt ...ServerOption) // ClearGlobalServerOptions clears the array of extra ServerOption. This // method is useful in testing and benchmarking. // @@ -88,14 +97,14 @@ var ( // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - AddGlobalDialOptions interface{} // func(opt ...DialOption) + AddGlobalDialOptions any // func(opt ...DialOption) // DisableGlobalDialOptions returns a DialOption that prevents the // ClientConn from applying the global DialOptions (set via // AddGlobalDialOptions). // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - DisableGlobalDialOptions interface{} // func() grpc.DialOption + DisableGlobalDialOptions any // func() grpc.DialOption // ClearGlobalDialOptions clears the array of extra DialOption. This // method is useful in testing and benchmarking. // @@ -104,23 +113,26 @@ var ( ClearGlobalDialOptions func() // JoinDialOptions combines the dial options passed as arguments into a // single dial option. - JoinDialOptions interface{} // func(...grpc.DialOption) grpc.DialOption + JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption // JoinServerOptions combines the server options passed as arguments into a // single server option. - JoinServerOptions interface{} // func(...grpc.ServerOption) grpc.ServerOption + JoinServerOptions any // func(...grpc.ServerOption) grpc.ServerOption // WithBinaryLogger returns a DialOption that specifies the binary logger // for a ClientConn. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - WithBinaryLogger interface{} // func(binarylog.Logger) grpc.DialOption + WithBinaryLogger any // func(binarylog.Logger) grpc.DialOption // BinaryLogger returns a ServerOption that can set the binary logger for a // server. // // This is used in the 1.0 release of gcp/observability, and thus must not be // deleted or changed. - BinaryLogger interface{} // func(binarylog.Logger) grpc.ServerOption + BinaryLogger any // func(binarylog.Logger) grpc.ServerOption + + // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn + SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber) // NewXDSResolverWithConfigForTesting creates a new xds resolver builder using // the provided xds bootstrap config instead of the global configuration from @@ -131,7 +143,7 @@ var ( // // This function should ONLY be used for testing and may not work with some // other features, including the CSDS service. - NewXDSResolverWithConfigForTesting interface{} // func([]byte) (resolver.Builder, error) + NewXDSResolverWithConfigForTesting any // func([]byte) (resolver.Builder, error) // RegisterRLSClusterSpecifierPluginForTesting registers the RLS Cluster // Specifier Plugin for testing purposes, regardless of the XDSRLS environment @@ -163,7 +175,19 @@ var ( UnregisterRBACHTTPFilterForTesting func() // ORCAAllowAnyMinReportingInterval is for examples/orca use ONLY. - ORCAAllowAnyMinReportingInterval interface{} // func(so *orca.ServiceOptions) + ORCAAllowAnyMinReportingInterval any // func(so *orca.ServiceOptions) + + // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra + // metadata to RPCs. + GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error + + ChannelzTurnOffForTesting func() ) // HealthChecker defines the signature of the client-side LB channel health checking function. @@ -174,7 +198,7 @@ var ( // // The health checking protocol is defined at: // https://github.com/grpc/grpc/blob/master/doc/health-checking.md -type HealthChecker func(ctx context.Context, newStream func(string) (interface{}, error), setConnectivityState func(connectivity.State, error), serviceName string) error +type HealthChecker func(ctx context.Context, newStream func(string) (any, error), setConnectivityState func(connectivity.State, error), serviceName string) error const ( // CredsBundleModeFallback switches GoogleDefaultCreds to fallback mode. diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/metadata/metadata.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/metadata/metadata.go index c82e608e077..900bfb71608 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/metadata/metadata.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/metadata/metadata.go @@ -35,7 +35,7 @@ const mdKey = mdKeyType("grpc.internal.address.metadata") type mdValue metadata.MD -func (m mdValue) Equal(o interface{}) bool { +func (m mdValue) Equal(o any) bool { om, ok := o.(mdValue) if !ok { return false diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/pretty/pretty.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/pretty/pretty.go index 0177af4b511..7033191375d 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/pretty/pretty.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/pretty/pretty.go @@ -35,7 +35,7 @@ const jsonIndent = " " // ToJSON marshals the input into a json string. // // If marshal fails, it falls back to fmt.Sprintf("%+v"). -func ToJSON(e interface{}) string { +func ToJSON(e any) string { switch ee := e.(type) { case protov1.Message: mm := jsonpb.Marshaler{Indent: jsonIndent} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/config_selector.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/config_selector.go index c7a18a948ad..f0603871c93 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/config_selector.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/config_selector.go @@ -92,7 +92,7 @@ type ClientStream interface { // calling RecvMsg on the same stream at the same time, but it is not safe // to call SendMsg on the same stream in different goroutines. It is also // not safe to call CloseSend concurrently with SendMsg. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -101,7 +101,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientInterceptor is an interceptor for gRPC client streams. diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go index 99e1e5b36c8..b66dcb21327 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go @@ -23,7 +23,6 @@ package dns import ( "context" "encoding/json" - "errors" "fmt" "net" "os" @@ -37,6 +36,7 @@ import ( "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/resolver/dns/internal" "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) @@ -47,15 +47,11 @@ var EnableSRVLookups = false var logger = grpclog.Component("dns") -// Globals to stub out in tests. TODO: Perhaps these two can be combined into a -// single variable for testing the resolver? -var ( - newTimer = time.NewTimer - newTimerDNSResRate = time.NewTimer -) - func init() { resolver.Register(NewBuilder()) + internal.TimeAfterFunc = time.After + internal.NewNetResolver = newNetResolver + internal.AddressDialer = addressDialer } const ( @@ -70,23 +66,6 @@ const ( txtAttribute = "grpc_config=" ) -var ( - errMissingAddr = errors.New("dns resolver: missing address") - - // Addresses ending with a colon that is supposed to be the separator - // between host and port is not allowed. E.g. "::" is a valid address as - // it is an IPv6 address (host only) and "[::]:" is invalid as it ends with - // a colon as the host and port separator - errEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") -) - -var ( - defaultResolver netResolver = net.DefaultResolver - // To prevent excessive re-resolution, we enforce a rate limit on DNS - // resolution requests. - minDNSResRate = 30 * time.Second -) - var addressDialer = func(address string) func(context.Context, string, string) (net.Conn, error) { return func(ctx context.Context, network, _ string) (net.Conn, error) { var dialer net.Dialer @@ -94,7 +73,11 @@ var addressDialer = func(address string) func(context.Context, string, string) ( } } -var newNetResolver = func(authority string) (netResolver, error) { +var newNetResolver = func(authority string) (internal.NetResolver, error) { + if authority == "" { + return net.DefaultResolver, nil + } + host, port, err := parseTarget(authority, defaultDNSSvrPort) if err != nil { return nil, err @@ -104,7 +87,7 @@ var newNetResolver = func(authority string) (netResolver, error) { return &net.Resolver{ PreferGo: true, - Dial: addressDialer(authorityWithPort), + Dial: internal.AddressDialer(authorityWithPort), }, nil } @@ -142,13 +125,9 @@ func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts disableServiceConfig: opts.DisableServiceConfig, } - if target.URL.Host == "" { - d.resolver = defaultResolver - } else { - d.resolver, err = newNetResolver(target.URL.Host) - if err != nil { - return nil, err - } + d.resolver, err = internal.NewNetResolver(target.URL.Host) + if err != nil { + return nil, err } d.wg.Add(1) @@ -161,12 +140,6 @@ func (b *dnsBuilder) Scheme() string { return "dns" } -type netResolver interface { - LookupHost(ctx context.Context, host string) (addrs []string, err error) - LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) - LookupTXT(ctx context.Context, name string) (txts []string, err error) -} - // deadResolver is a resolver that does nothing. type deadResolver struct{} @@ -178,7 +151,7 @@ func (deadResolver) Close() {} type dnsResolver struct { host string port string - resolver netResolver + resolver internal.NetResolver ctx context.Context cancel context.CancelFunc cc resolver.ClientConn @@ -223,29 +196,27 @@ func (d *dnsResolver) watcher() { err = d.cc.UpdateState(*state) } - var timer *time.Timer + var waitTime time.Duration if err == nil { // Success resolving, wait for the next ResolveNow. However, also wait 30 // seconds at the very least to prevent constantly re-resolving. backoffIndex = 1 - timer = newTimerDNSResRate(minDNSResRate) + waitTime = internal.MinResolutionRate select { case <-d.ctx.Done(): - timer.Stop() return case <-d.rn: } } else { // Poll on an error found in DNS Resolver or an error received from // ClientConn. - timer = newTimer(backoff.DefaultExponential.Backoff(backoffIndex)) + waitTime = backoff.DefaultExponential.Backoff(backoffIndex) backoffIndex++ } select { case <-d.ctx.Done(): - timer.Stop() return - case <-timer.C: + case <-internal.TimeAfterFunc(waitTime): } } } @@ -387,7 +358,7 @@ func formatIP(addr string) (addrIP string, ok bool) { // target: ":80" defaultPort: "443" returns host: "localhost", port: "80" func parseTarget(target, defaultPort string) (host, port string, err error) { if target == "" { - return "", "", errMissingAddr + return "", "", internal.ErrMissingAddr } if ip := net.ParseIP(target); ip != nil { // target is an IPv4 or IPv6(without brackets) address @@ -397,7 +368,7 @@ func parseTarget(target, defaultPort string) (host, port string, err error) { if port == "" { // If the port field is empty (target ends with colon), e.g. "[::1]:", // this is an error. - return "", "", errEndsWithColon + return "", "", internal.ErrEndsWithColon } // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port if host == "" { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go new file mode 100644 index 00000000000..c7fc557d00c --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go @@ -0,0 +1,70 @@ +/* + * + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains functionality internal to the dns resolver package. +package internal + +import ( + "context" + "errors" + "net" + "time" +) + +// NetResolver groups the methods on net.Resolver that are used by the DNS +// resolver implementation. This allows the default net.Resolver instance to be +// overidden from tests. +type NetResolver interface { + LookupHost(ctx context.Context, host string) (addrs []string, err error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + LookupTXT(ctx context.Context, name string) (txts []string, err error) +} + +var ( + // ErrMissingAddr is the error returned when building a DNS resolver when + // the provided target name is empty. + ErrMissingAddr = errors.New("dns resolver: missing address") + + // ErrEndsWithColon is the error returned when building a DNS resolver when + // the provided target name ends with a colon that is supposed to be the + // separator between host and port. E.g. "::" is a valid address as it is + // an IPv6 address (host only) and "[::]:" is invalid as it ends with a + // colon as the host and port separator + ErrEndsWithColon = errors.New("dns resolver: missing port after port-separator colon") +) + +// The following vars are overridden from tests. +var ( + // MinResolutionRate is the minimum rate at which re-resolutions are + // allowed. This helps to prevent excessive re-resolution. + MinResolutionRate = 30 * time.Second + + // TimeAfterFunc is used by the DNS resolver to wait for the given duration + // to elapse. In non-test code, this is implemented by time.After. In test + // code, this can be used to control the amount of time the resolver is + // blocked waiting for the duration to elapse. + TimeAfterFunc func(time.Duration) <-chan time.Time + + // NewNetResolver returns the net.Resolver instance for the given target. + NewNetResolver func(string) (NetResolver, error) + + // AddressDialer is the dialer used to dial the DNS server. It accepts the + // Host portion of the URL corresponding to the user's dial target and + // returns a dial function. + AddressDialer func(address string) func(context.Context, string, string) (net.Conn, error) +) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/status/status.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/status/status.go index b0ead4f54f8..03ef2fedd5c 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/status/status.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,13 +43,41 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -64,7 +92,7 @@ func Err(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Err(c, fmt.Sprintf(format, a...)) } @@ -120,11 +148,11 @@ func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { // Details returns a slice of details messages attached to the status. // If a detail cannot be decoded, the error is returned in place of the detail. -func (s *Status) Details() []interface{} { +func (s *Status) Details() []any { if s == nil || s.s == nil { return nil } - details := make([]interface{}, 0, len(s.s.Details)) + details := make([]any, 0, len(s.s.Details)) for _, any := range s.s.Details { detail := &ptypes.DynamicAny{} if err := ptypes.UnmarshalAny(any, detail); err != nil { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go new file mode 100644 index 00000000000..aeffd3e1c7b --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_nonunix.go @@ -0,0 +1,29 @@ +//go:build !unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" +) + +// NetDialerWithTCPKeepalive returns a vanilla net.Dialer on non-unix platforms. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{} +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go new file mode 100644 index 00000000000..078137b7fd7 --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/tcp_keepalive_unix.go @@ -0,0 +1,54 @@ +//go:build unix + +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package internal + +import ( + "net" + "syscall" + "time" + + "golang.org/x/sys/unix" +) + +// NetDialerWithTCPKeepalive returns a net.Dialer that enables TCP keepalives on +// the underlying connection with OS default values for keepalive parameters. +// +// TODO: Once https://github.com/golang/go/issues/62254 lands, and the +// appropriate Go version becomes less than our least supported Go version, we +// should look into using the new API to make things more straightforward. +func NetDialerWithTCPKeepalive() *net.Dialer { + return &net.Dialer{ + // Setting a negative value here prevents the Go stdlib from overriding + // the values of TCP keepalive time and interval. It also prevents the + // Go stdlib from enabling TCP keepalives by default. + KeepAlive: time.Duration(-1), + // This method is called after the underlying network socket is created, + // but before dialing the socket (or calling its connect() method). The + // combination of unconditionally enabling TCP keepalives here, and + // disabling the overriding of TCP keepalive parameters by setting the + // KeepAlive field to a negative value above, results in OS defaults for + // the TCP keealive interval and time parameters. + Control: func(_, _ string, c syscall.RawConn) error { + return c.Control(func(fd uintptr) { + unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1) + }) + }, + } +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go index be5a9c81eb9..b330ccedc8a 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/controlbuf.go @@ -40,7 +40,7 @@ var updateHeaderTblSize = func(e *hpack.Encoder, v uint32) { } type itemNode struct { - it interface{} + it any next *itemNode } @@ -49,7 +49,7 @@ type itemList struct { tail *itemNode } -func (il *itemList) enqueue(i interface{}) { +func (il *itemList) enqueue(i any) { n := &itemNode{it: i} if il.tail == nil { il.head, il.tail = n, n @@ -61,11 +61,11 @@ func (il *itemList) enqueue(i interface{}) { // peek returns the first item in the list without removing it from the // list. -func (il *itemList) peek() interface{} { +func (il *itemList) peek() any { return il.head.it } -func (il *itemList) dequeue() interface{} { +func (il *itemList) dequeue() any { if il.head == nil { return nil } @@ -336,7 +336,7 @@ func (c *controlBuffer) put(it cbItem) error { return err } -func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (bool, error) { +func (c *controlBuffer) executeAndPut(f func(it any) bool, it cbItem) (bool, error) { var wakeUp bool c.mu.Lock() if c.err != nil { @@ -373,7 +373,7 @@ func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it cbItem) (b } // Note argument f should never be nil. -func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bool, error) { +func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) { c.mu.Lock() if c.err != nil { c.mu.Unlock() @@ -387,7 +387,7 @@ func (c *controlBuffer) execute(f func(it interface{}) bool, it interface{}) (bo return true, nil } -func (c *controlBuffer) get(block bool) (interface{}, error) { +func (c *controlBuffer) get(block bool) (any, error) { for { c.mu.Lock() if c.err != nil { @@ -830,7 +830,7 @@ func (l *loopyWriter) goAwayHandler(g *goAway) error { return nil } -func (l *loopyWriter) handle(i interface{}) error { +func (l *loopyWriter) handle(i any) error { switch i := i.(type) { case *incomingWindowUpdate: l.incomingWindowUpdateHandler(i) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa00..a9d70e2a16c 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -75,11 +75,25 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s return nil, errors.New(msg) } + var localAddr net.Addr + if la := r.Context().Value(http.LocalAddrContextKey); la != nil { + localAddr, _ = la.(net.Addr) + } + var authInfo credentials.AuthInfo + if r.TLS != nil { + authInfo = credentials.TLSInfo{State: *r.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} + } + p := peer.Peer{ + Addr: strAddr(r.RemoteAddr), + LocalAddr: localAddr, + AuthInfo: authInfo, + } st := &serverHandlerTransport{ rw: w, req: r, closedCh: make(chan struct{}), writes: make(chan func()), + peer: p, contentType: contentType, contentSubtype: contentSubtype, stats: stats, @@ -134,6 +148,8 @@ type serverHandlerTransport struct { headerMD metadata.MD + peer peer.Peer + closeOnce sync.Once closedCh chan struct{} // closed on Close @@ -165,7 +181,13 @@ func (ht *serverHandlerTransport) Close(err error) { }) } -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } +func (ht *serverHandlerTransport) Peer() *peer.Peer { + return &peer.Peer{ + Addr: ht.peer.Addr, + LocalAddr: ht.peer.LocalAddr, + AuthInfo: ht.peer.AuthInfo, + } +} // strAddr is a net.Addr backed by either a TCP "ip:port" string, or // the empty string if unknown. @@ -220,18 +242,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +267,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +312,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,10 +369,8 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. - - ctx := ht.req.Context() var cancel context.CancelFunc if ht.timeoutSet { ctx, cancel = context.WithTimeout(ctx, ht.timeout) @@ -367,34 +390,19 @@ func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), trace ht.Close(errors.New("request is done processing")) }() + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) req := ht.req - s := &Stream{ - id: 0, // irrelevant - requestRead: func(int) {}, - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - contentSubtype: ht.contentSubtype, - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS, CommonAuthInfo: credentials.CommonAuthInfo{SecurityLevel: credentials.PrivacyAndIntegrity}} - } - ctx = metadata.NewIncomingContext(ctx, ht.headerMD) - s.ctx = peer.NewContext(ctx, pr) - for _, sh := range ht.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: ht.RemoteAddr(), - Compression: s.recvCompress, - } - sh.HandleRPC(s.ctx, inHeader) + id: 0, // irrelevant + ctx: ctx, + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + headerWireLength: 0, // won't have access to header wire length until golang/go#18997. } s.trReader = &transportReader{ reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}}, diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_client.go index 326bf084800..59f67655a85 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -36,6 +36,7 @@ import ( "golang.org/x/net/http2/hpack" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/channelz" icredentials "google.golang.org/grpc/internal/credentials" "google.golang.org/grpc/internal/grpclog" @@ -43,7 +44,7 @@ import ( "google.golang.org/grpc/internal/grpcutil" imetadata "google.golang.org/grpc/internal/metadata" istatus "google.golang.org/grpc/internal/status" - "google.golang.org/grpc/internal/syscall" + isyscall "google.golang.org/grpc/internal/syscall" "google.golang.org/grpc/internal/transport/networktype" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" @@ -176,7 +177,7 @@ func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error if networkType == "tcp" && useProxy { return proxyDial(ctx, address, grpcUA) } - return (&net.Dialer{}).DialContext(ctx, networkType, address) + return internal.NetDialerWithTCPKeepalive().DialContext(ctx, networkType, address) } func isTemporary(err error) bool { @@ -262,7 +263,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts } keepaliveEnabled := false if kp.Time != infinity { - if err = syscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { + if err = isyscall.SetTCPUserTimeout(conn, kp.Timeout); err != nil { return nil, connectionErrorf(false, err, "transport: failed to set TCP_USER_TIMEOUT: %v", err) } keepaliveEnabled = true @@ -330,7 +331,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts readerDone: make(chan struct{}), writerDone: make(chan struct{}), goAway: make(chan struct{}), - framer: newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize), + framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize), fc: &trInFlow{limit: uint32(icwz)}, scheme: scheme, activeStreams: make(map[uint32]*Stream), @@ -493,8 +494,9 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { func (t *http2Client) getPeer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.remoteAddr, + AuthInfo: t.authInfo, // Can be nil + LocalAddr: t.localAddr, } } @@ -762,7 +764,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, firstTry := true var ch chan struct{} transportDrainRequired := false - checkForStreamQuota := func(it interface{}) bool { + checkForStreamQuota := func(it any) bool { if t.streamQuota <= 0 { // Can go negative if server decreases it. if firstTry { t.waitingStreams++ @@ -800,7 +802,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } var hdrListSizeErr error - checkForHeaderListSize := func(it interface{}) bool { + checkForHeaderListSize := func(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -815,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, return true } for { - success, err := t.controlBuf.executeAndPut(func(it interface{}) bool { + success, err := t.controlBuf.executeAndPut(func(it any) bool { return checkForHeaderListSize(it) && checkForStreamQuota(it) }, hdr) if err != nil { @@ -927,7 +929,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2. rst: rst, rstCode: rstCode, } - addBackStreamQuota := func(interface{}) bool { + addBackStreamQuota := func(any) bool { t.streamQuota++ if t.streamQuota > 0 && t.waitingStreams > 0 { select { @@ -1080,7 +1082,7 @@ func (t *http2Client) updateWindow(s *Stream, n uint32) { // for the transport and the stream based on the current bdp // estimation. func (t *http2Client) updateFlowControl(n uint32) { - updateIWS := func(interface{}) bool { + updateIWS := func(any) bool { t.initialWindowSize = int32(n) t.mu.Lock() for _, s := range t.activeStreams { @@ -1233,7 +1235,7 @@ func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { } updateFuncs = append(updateFuncs, updateStreamQuota) } - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -1399,7 +1401,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1435,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1505,14 +1500,15 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - isHeader := false - - // If headerChan hasn't been closed yet - if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { - s.headerValid = true - if !endStream { - // HEADERS frame block carries a Response-Headers. - isHeader = true + // For headers, set them in s.header and close headerChan. For trailers or + // trailers-only, closeStream will set the trailers and close headerChan as + // needed. + if !endStream { + // If headerChan hasn't been closed yet (expected, given we checked it + // above, but something else could have potentially closed the whole + // stream). + if atomic.CompareAndSwapUint32(&s.headerChanClosed, 0, 1) { + s.headerValid = true // These values can be set without any synchronization because // stream goroutine will read it only after seeing a closed // headerChan which we'll close after setting this. @@ -1520,15 +1516,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { if len(mdata) > 0 { s.header = mdata } - } else { - // HEADERS frame block carries a Trailers-Only. - s.noHeaders = true + close(s.headerChan) } - close(s.headerChan) } for _, sh := range t.statsHandlers { - if isHeader { + if !endStream { inHeader := &stats.InHeader{ Client: true, WireLength: int(frame.Header().Length), @@ -1550,13 +1543,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) - // if client received END_STREAM from server while stream was still active, send RST_STREAM - rst := s.getState() == streamActive - t.closeStream(s, io.EOF, rst, http2.ErrCodeNo, statusGen, mdata, true) + // If client received END_STREAM from server while stream was still active, + // send RST_STREAM. + rstStream := s.getState() == streamActive + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_server.go index 0bc7a7d576d..680c9eba0b1 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -68,18 +68,15 @@ var serverConnectionCounter uint64 // http2Server implements the ServerTransport interface with HTTP2. type http2Server struct { - lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. - ctx context.Context - done chan struct{} - conn net.Conn - loopy *loopyWriter - readerDone chan struct{} // sync point to enable testing. - writerDone chan struct{} // sync point to enable testing. - remoteAddr net.Addr - localAddr net.Addr - authInfo credentials.AuthInfo // auth info about the connection - inTapHandle tap.ServerInHandle - framer *framer + lastRead int64 // Keep this field 64-bit aligned. Accessed atomically. + done chan struct{} + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + loopyWriterDone chan struct{} + peer peer.Peer + inTapHandle tap.ServerInHandle + framer *framer // The max number of concurrent streams. maxStreams uint32 // controlBuf delivers all the control related tasks (e.g., window @@ -165,7 +162,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, if config.MaxHeaderListSize != nil { maxHeaderListSize = *config.MaxHeaderListSize } - framer := newFramer(conn, writeBufSize, readBufSize, maxHeaderListSize) + framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize) // Send initial settings as connection preface to client. isettings := []http2.Setting{{ ID: http2.SettingMaxFrameSize, @@ -243,16 +240,18 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, } done := make(chan struct{}) + peer := peer.Peer{ + Addr: conn.RemoteAddr(), + LocalAddr: conn.LocalAddr(), + AuthInfo: authInfo, + } t := &http2Server{ - ctx: setConnection(context.Background(), rawConn), done: done, conn: conn, - remoteAddr: conn.RemoteAddr(), - localAddr: conn.LocalAddr(), - authInfo: authInfo, + peer: peer, framer: framer, readerDone: make(chan struct{}), - writerDone: make(chan struct{}), + loopyWriterDone: make(chan struct{}), maxStreams: config.MaxStreams, inTapHandle: config.InTapHandle, fc: &trInFlow{limit: uint32(icwz)}, @@ -267,8 +266,6 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, bufferPool: newBufferPool(), } t.logger = prefixLoggerForServerTransport(t) - // Add peer information to the http2server context. - t.ctx = peer.NewContext(t.ctx, t.getPeer()) t.controlBuf = newControlBuffer(t.done) if dynamicWindow { @@ -277,15 +274,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, updateFlowControl: t.updateFlowControl, } } - for _, sh := range t.stats { - t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{ - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - }) - connBegin := &stats.ConnBegin{} - sh.HandleConn(t.ctx, connBegin) - } - t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.remoteAddr, t.localAddr)) + t.channelzID, err = channelz.RegisterNormalSocket(t, config.ChannelzParentID, fmt.Sprintf("%s -> %s", t.peer.Addr, t.peer.LocalAddr)) if err != nil { return nil, err } @@ -334,7 +323,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger) t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler t.loopy.run() - close(t.writerDone) + close(t.loopyWriterDone) }() go t.keepalive() return t, nil @@ -342,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -369,10 +358,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( buf := newRecvBuffer() s := &Stream{ - id: streamID, - st: t, - buf: buf, - fc: &inFlow{limit: uint32(t.initialWindowSize)}, + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + headerWireLength: int(frame.Header().Length), } var ( // if false, content-type was missing or invalid @@ -511,9 +501,9 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.state = streamReadDone } if timeoutSet { - s.ctx, s.cancel = context.WithTimeout(t.ctx, timeout) + s.ctx, s.cancel = context.WithTimeout(ctx, timeout) } else { - s.ctx, s.cancel = context.WithCancel(t.ctx) + s.ctx, s.cancel = context.WithCancel(ctx) } // Attach the received metadata to the context. @@ -561,7 +551,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,19 +582,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) - for _, sh := range t.stats { - s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) - inHeader := &stats.InHeader{ - FullMethod: s.method, - RemoteAddr: t.remoteAddr, - LocalAddr: t.localAddr, - Compression: s.recvCompress, - WireLength: int(frame.Header().Length), - Header: mdata.Copy(), - } - sh.HandleRPC(s.ctx, inHeader) - } s.ctxDone = s.ctx.Done() s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) s.trReader = &transportReader{ @@ -630,8 +607,11 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { - defer close(t.readerDone) +func (t *http2Server) HandleStreams(ctx context.Context, handle func(*Stream)) { + defer func() { + <-t.loopyWriterDone + close(t.readerDone) + }() for { t.controlBuf.throttle() frame, err := t.framer.fr.ReadFrame() @@ -665,7 +645,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(ctx, frame, handle); err != nil { t.Close(err) break } @@ -850,7 +830,7 @@ func (t *http2Server) handleSettings(f *http2.SettingsFrame) { } return nil }) - t.controlBuf.executeAndPut(func(interface{}) bool { + t.controlBuf.executeAndPut(func(any) bool { for _, f := range updateFuncs { f() } @@ -934,7 +914,7 @@ func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) return headerFields } -func (t *http2Server) checkForHeaderListSize(it interface{}) bool { +func (t *http2Server) checkForHeaderListSize(it any) bool { if t.maxSendHeaderListSize == nil { return true } @@ -1053,12 +1033,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } @@ -1240,10 +1223,6 @@ func (t *http2Server) Close(err error) { for _, s := range streams { s.cancel() } - for _, sh := range t.stats { - connEnd := &stats.ConnEnd{} - sh.HandleConn(t.ctx, connEnd) - } } // deleteStream deletes the stream s from transport's active streams. @@ -1309,10 +1288,6 @@ func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, eo }) } -func (t *http2Server) RemoteAddr() net.Addr { - return t.remoteAddr -} - func (t *http2Server) Drain(debugData string) { t.mu.Lock() defer t.mu.Unlock() @@ -1395,11 +1370,11 @@ func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { LastMessageReceivedTimestamp: time.Unix(0, atomic.LoadInt64(&t.czData.lastMsgRecvTime)), LocalFlowControlWindow: int64(t.fc.getSize()), SocketOptions: channelz.GetSocketOption(t.conn), - LocalAddr: t.localAddr, - RemoteAddr: t.remoteAddr, + LocalAddr: t.peer.LocalAddr, + RemoteAddr: t.peer.Addr, // RemoteName : } - if au, ok := t.authInfo.(credentials.ChannelzSecurityInfo); ok { + if au, ok := t.peer.AuthInfo.(credentials.ChannelzSecurityInfo); ok { s.Security = au.GetSecurityValue() } s.RemoteFlowControlWindow = t.getOutFlowWindow() @@ -1431,10 +1406,12 @@ func (t *http2Server) getOutFlowWindow() int64 { } } -func (t *http2Server) getPeer() *peer.Peer { +// Peer returns the peer of the transport. +func (t *http2Server) Peer() *peer.Peer { return &peer.Peer{ - Addr: t.remoteAddr, - AuthInfo: t.authInfo, // Can be nil + Addr: t.peer.Addr, + LocalAddr: t.peer.LocalAddr, + AuthInfo: t.peer.AuthInfo, // Can be nil } } @@ -1459,6 +1436,6 @@ func GetConnection(ctx context.Context) net.Conn { // SetConnection adds the connection to the context to be able to get // information about the destination ip and port for an incoming RPC. This also // allows any unary or streaming interceptors to see the connection. -func setConnection(ctx context.Context, conn net.Conn) context.Context { +func SetConnection(ctx context.Context, conn net.Conn) context.Context { return context.WithValue(ctx, connectionKey{}, conn) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http_util.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http_util.go index 19cbb18f5ab..dc29d590e91 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -30,15 +30,13 @@ import ( "net/url" "strconv" "strings" + "sync" "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -87,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -102,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -153,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( @@ -309,6 +296,7 @@ func decodeGrpcMessageUnchecked(msg string) string { } type bufWriter struct { + pool *sync.Pool buf []byte offset int batchSize int @@ -316,12 +304,17 @@ type bufWriter struct { err error } -func newBufWriter(conn net.Conn, batchSize int) *bufWriter { - return &bufWriter{ - buf: make([]byte, batchSize*2), +func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter { + w := &bufWriter{ batchSize: batchSize, conn: conn, + pool: pool, + } + // this indicates that we should use non shared buf + if pool == nil { + w.buf = make([]byte, batchSize) } + return w } func (w *bufWriter) Write(b []byte) (n int, err error) { @@ -332,19 +325,34 @@ func (w *bufWriter) Write(b []byte) (n int, err error) { n, err = w.conn.Write(b) return n, toIOError(err) } + if w.buf == nil { + b := w.pool.Get().(*[]byte) + w.buf = *b + } for len(b) > 0 { nn := copy(w.buf[w.offset:], b) b = b[nn:] w.offset += nn n += nn if w.offset >= w.batchSize { - err = w.Flush() + err = w.flushKeepBuffer() } } return n, err } func (w *bufWriter) Flush() error { + err := w.flushKeepBuffer() + // Only release the buffer if we are in a "shared" mode + if w.buf != nil && w.pool != nil { + b := w.buf + w.pool.Put(&b) + w.buf = nil + } + return err +} + +func (w *bufWriter) flushKeepBuffer() error { if w.err != nil { return w.err } @@ -381,7 +389,10 @@ type framer struct { fr *http2.Framer } -func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderListSize uint32) *framer { +var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool) +var writeBufferMutex sync.Mutex + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer { if writeBufferSize < 0 { writeBufferSize = 0 } @@ -389,7 +400,11 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList if readBufferSize > 0 { r = bufio.NewReaderSize(r, readBufferSize) } - w := newBufWriter(conn, writeBufferSize) + var pool *sync.Pool + if sharedWriteBuffer { + pool = getWriteBufferPool(writeBufferSize) + } + w := newBufWriter(conn, writeBufferSize, pool) f := &framer{ writer: w, fr: http2.NewFramer(w, r), @@ -403,6 +418,24 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, maxHeaderList return f } +func getWriteBufferPool(writeBufferSize int) *sync.Pool { + writeBufferMutex.Lock() + defer writeBufferMutex.Unlock() + size := writeBufferSize * 2 + pool, ok := writeBufferPoolMap[size] + if ok { + return pool + } + pool = &sync.Pool{ + New: func() any { + b := make([]byte, size) + return &b + }, + } + writeBufferPoolMap[size] = pool + return pool +} + // parseDialTarget returns the network and address to pass to dialer. func parseDialTarget(target string) (string, string) { net := "tcp" diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/proxy.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/proxy.go index 41596198787..24fa1032574 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/proxy.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/proxy.go @@ -28,6 +28,8 @@ import ( "net/http" "net/http/httputil" "net/url" + + "google.golang.org/grpc/internal" ) const proxyAuthHeaderKey = "Proxy-Authorization" @@ -112,7 +114,7 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri // proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy // is necessary, dials, does the HTTP CONNECT handshake, and returns the // connection. -func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, err error) { +func proxyDial(ctx context.Context, addr string, grpcUA string) (net.Conn, error) { newAddr := addr proxyURL, err := mapAddress(addr) if err != nil { @@ -122,15 +124,15 @@ func proxyDial(ctx context.Context, addr string, grpcUA string) (conn net.Conn, newAddr = proxyURL.Host } - conn, err = (&net.Dialer{}).DialContext(ctx, "tcp", newAddr) + conn, err := internal.NetDialerWithTCPKeepalive().DialContext(ctx, "tcp", newAddr) if err != nil { - return + return nil, err } - if proxyURL != nil { + if proxyURL == nil { // proxy is disabled if proxyURL is nil. - conn, err = doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) + return conn, err } - return + return doHTTPConnectHandshake(ctx, conn, addr, proxyURL, grpcUA) } func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/transport.go b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/transport.go index aa1c896595d..b7b8fec1804 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -37,16 +37,13 @@ import ( "google.golang.org/grpc/internal/channelz" "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" "google.golang.org/grpc/resolver" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" "google.golang.org/grpc/tap" ) -// ErrNoHeaders is used as a signal that a trailers only response was received, -// and is not a real error. -var ErrNoHeaders = errors.New("stream has no headers") - const logLevel = 2 type bufferPool struct { @@ -56,7 +53,7 @@ type bufferPool struct { func newBufferPool() *bufferPool { return &bufferPool{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { return new(bytes.Buffer) }, }, @@ -269,7 +266,8 @@ type Stream struct { // headerValid indicates whether a valid header was received. Only // meaningful after headerChan is closed (always call waitOnHeader() before // reading its value). Not valid on server side. - headerValid bool + headerValid bool + headerWireLength int // Only set on server side. // hdrMu protects header and trailer metadata on the server-side. hdrMu sync.Mutex @@ -390,14 +388,10 @@ func (s *Stream) Header() (metadata.MD, error) { } s.waitOnHeader() - if !s.headerValid { + if !s.headerValid || s.noHeaders { return nil, s.status.Err() } - if s.noHeaders { - return nil, ErrNoHeaders - } - return s.header.Copy(), nil } @@ -433,6 +427,12 @@ func (s *Stream) Context() context.Context { return s.ctx } +// SetContext sets the context of the stream. This will be deleted once the +// stats handler callouts all move to gRPC layer. +func (s *Stream) SetContext(ctx context.Context) { + s.ctx = ctx +} + // Method returns the method for the stream. func (s *Stream) Method() string { return s.method @@ -445,6 +445,12 @@ func (s *Stream) Status() *status.Status { return s.status } +// HeaderWireLength returns the size of the headers of the stream as received +// from the wire. Valid only on the server. +func (s *Stream) HeaderWireLength() int { + return s.headerWireLength +} + // SetHeader sets the header metadata. This can be called multiple times. // Server side only. // This should not be called in parallel to other data writes. @@ -559,6 +565,7 @@ type ServerConfig struct { InitialConnWindowSize int32 WriteBufferSize int ReadBufferSize int + SharedWriteBuffer bool ChannelzParentID *channelz.Identifier MaxHeaderListSize *uint32 HeaderTableSize *uint32 @@ -592,6 +599,8 @@ type ConnectOptions struct { WriteBufferSize int // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. ReadBufferSize int + // SharedWriteBuffer indicates whether connections should reuse write buffer + SharedWriteBuffer bool // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. ChannelzParentID *channelz.Identifier // MaxHeaderListSize sets the max (uncompressed) size of header list that is prepared to be received. @@ -703,7 +712,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(context.Context, func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. @@ -722,8 +731,8 @@ type ServerTransport interface { // handlers will be terminated asynchronously. Close(err error) - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr + // Peer returns the peer of the server transport. + Peer() *peer.Peer // Drain notifies the client this ServerTransport stops accepting new RPCs. Drain(debugData string) @@ -736,7 +745,7 @@ type ServerTransport interface { } // connectionErrorf creates an ConnectionError with the specified error description. -func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { +func connectionErrorf(temp bool, e error, format string, a ...any) ConnectionError { return ConnectionError{ Desc: fmt.Sprintf(format, a...), temp: temp, diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/metadata/metadata.go b/.ci/providerlint/vendor/google.golang.org/grpc/metadata/metadata.go index a2cdcaf12a8..49446825763 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/metadata/metadata.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/metadata/metadata.go @@ -153,14 +153,16 @@ func Join(mds ...MD) MD { type mdIncomingKey struct{} type mdOutgoingKey struct{} -// NewIncomingContext creates a new context with incoming md attached. +// NewIncomingContext creates a new context with incoming md attached. md must +// not be modified after calling this function. func NewIncomingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdIncomingKey{}, md) } // NewOutgoingContext creates a new context with outgoing md attached. If used // in conjunction with AppendToOutgoingContext, NewOutgoingContext will -// overwrite any previously-appended metadata. +// overwrite any previously-appended metadata. md must not be modified after +// calling this function. func NewOutgoingContext(ctx context.Context, md MD) context.Context { return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) } @@ -203,7 +205,8 @@ func FromIncomingContext(ctx context.Context) (MD, bool) { } // ValueFromIncomingContext returns the metadata value corresponding to the metadata -// key from the incoming metadata if it exists. Key must be lower-case. +// key from the incoming metadata if it exists. Keys are matched in a case insensitive +// manner. // // # Experimental // @@ -219,17 +222,16 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string { return copyOf(v) } for k, v := range md { - // We need to manually convert all keys to lower case, because MD is a - // map, and there's no guarantee that the MD attached to the context is - // created using our helper functions. - if strings.ToLower(k) == key { + // Case insenitive comparison: MD is a map, and there's no guarantee + // that the MD attached to the context is created using our helper + // functions. + if strings.EqualFold(k, key) { return copyOf(v) } } return nil } -// the returned slice must not be modified in place func copyOf(v []string) []string { vals := make([]string, len(v)) copy(vals, v) diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/peer/peer.go b/.ci/providerlint/vendor/google.golang.org/grpc/peer/peer.go index e01d219ffbc..a821ff9b2b7 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/peer/peer.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/peer/peer.go @@ -32,6 +32,8 @@ import ( type Peer struct { // Addr is the peer address. Addr net.Addr + // LocalAddr is the local address. + LocalAddr net.Addr // AuthInfo is the authentication information of the transport. // It is nil if there is no transport security being used. AuthInfo credentials.AuthInfo diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/picker_wrapper.go b/.ci/providerlint/vendor/google.golang.org/grpc/picker_wrapper.go index 02f97595124..bf56faa76d3 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/picker_wrapper.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/picker_wrapper.go @@ -28,31 +28,31 @@ import ( "google.golang.org/grpc/internal/channelz" istatus "google.golang.org/grpc/internal/status" "google.golang.org/grpc/internal/transport" + "google.golang.org/grpc/stats" "google.golang.org/grpc/status" ) // pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick // actions and unblock when there's a picker update. type pickerWrapper struct { - mu sync.Mutex - done bool - idle bool - blockingCh chan struct{} - picker balancer.Picker + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + statsHandlers []stats.Handler // to record blocking picker calls } -func newPickerWrapper() *pickerWrapper { - return &pickerWrapper{blockingCh: make(chan struct{})} +func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper { + return &pickerWrapper{ + blockingCh: make(chan struct{}), + statsHandlers: statsHandlers, + } } // updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. func (pw *pickerWrapper) updatePicker(p balancer.Picker) { pw.mu.Lock() - if pw.done || pw.idle { - // There is a small window where a picker update from the LB policy can - // race with the channel going to idle mode. If the picker is idle here, - // it is because the channel asked it to do so, and therefore it is sage - // to ignore the update from the LB policy. + if pw.done { pw.mu.Unlock() return } @@ -95,6 +95,7 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. var ch chan struct{} var lastPickErr error + for { pw.mu.Lock() if pw.done { @@ -129,6 +130,20 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer. continue } + // If the channel is set, it means that the pick call had to wait for a + // new picker at some point. Either it's the first iteration and this + // function received the first picker, or a picker errored with + // ErrNoSubConnAvailable or errored with failfast set to false, which + // will trigger a continue to the next iteration. In the first case this + // conditional will hit if this call had to block (the channel is set). + // In the second case, the only way it will get to this conditional is + // if there is a new picker. + if ch != nil { + for _, sh := range pw.statsHandlers { + sh.HandleRPC(ctx, &stats.PickerUpdated{}) + } + } + ch = pw.blockingCh p := pw.picker pw.mu.Unlock() @@ -190,23 +205,15 @@ func (pw *pickerWrapper) close() { close(pw.blockingCh) } -func (pw *pickerWrapper) enterIdleMode() { - pw.mu.Lock() - defer pw.mu.Unlock() - if pw.done { - return - } - pw.idle = true -} - -func (pw *pickerWrapper) exitIdleMode() { +// reset clears the pickerWrapper and prepares it for being used again when idle +// mode is exited. +func (pw *pickerWrapper) reset() { pw.mu.Lock() defer pw.mu.Unlock() if pw.done { return } pw.blockingCh = make(chan struct{}) - pw.idle = false } // dropError is a wrapper error that indicates the LB policy wishes to drop the diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/pickfirst.go b/.ci/providerlint/vendor/google.golang.org/grpc/pickfirst.go index abe266b021d..5128f9364dd 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/pickfirst.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/pickfirst.go @@ -25,13 +25,18 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/connectivity" - "google.golang.org/grpc/internal/envconfig" + internalgrpclog "google.golang.org/grpc/internal/grpclog" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" "google.golang.org/grpc/serviceconfig" ) -// PickFirstBalancerName is the name of the pick_first balancer. -const PickFirstBalancerName = "pick_first" +const ( + // PickFirstBalancerName is the name of the pick_first balancer. + PickFirstBalancerName = "pick_first" + logPrefix = "[pick-first-lb %p] " +) func newPickfirstBuilder() balancer.Builder { return &pickfirstBuilder{} @@ -40,7 +45,9 @@ func newPickfirstBuilder() balancer.Builder { type pickfirstBuilder struct{} func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { - return &pickfirstBalancer{cc: cc} + b := &pickfirstBalancer{cc: cc} + b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b)) + return b } func (*pickfirstBuilder) Name() string { @@ -57,23 +64,23 @@ type pfConfig struct { } func (*pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) { - cfg := &pfConfig{} - if err := json.Unmarshal(js, cfg); err != nil { + var cfg pfConfig + if err := json.Unmarshal(js, &cfg); err != nil { return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err) } return cfg, nil } type pickfirstBalancer struct { + logger *internalgrpclog.PrefixLogger state connectivity.State cc balancer.ClientConn subConn balancer.SubConn - cfg *pfConfig } func (b *pickfirstBalancer) ResolverError(err error) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ResolverError called with error: %v", err) + if b.logger.V(2) { + b.logger.Infof("Received error from the name resolver: %v", err) } if b.subConn == nil { b.state = connectivity.TransientFailure @@ -96,35 +103,44 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState // The resolver reported an empty address list. Treat it like an error by // calling b.ResolverError. if b.subConn != nil { - // Remove the old subConn. All addresses were removed, so it is no longer - // valid. - b.cc.RemoveSubConn(b.subConn) + // Shut down the old subConn. All addresses were removed, so it is + // no longer valid. + b.subConn.Shutdown() b.subConn = nil } b.ResolverError(errors.New("produced zero addresses")) return balancer.ErrBadResolverState } - if state.BalancerConfig != nil { - cfg, ok := state.BalancerConfig.(*pfConfig) - if !ok { - return fmt.Errorf("pickfirstBalancer: received nil or illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) - } - b.cfg = cfg + // We don't have to guard this block with the env var because ParseConfig + // already does so. + cfg, ok := state.BalancerConfig.(pfConfig) + if state.BalancerConfig != nil && !ok { + return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig) } - - if envconfig.PickFirstLBConfig && b.cfg != nil && b.cfg.ShuffleAddressList { + if cfg.ShuffleAddressList { + addrs = append([]resolver.Address{}, addrs...) grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] }) } + + if b.logger.V(2) { + b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState)) + } + if b.subConn != nil { b.cc.UpdateAddresses(b.subConn, addrs) return nil } - subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + var subConn balancer.SubConn + subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{ + StateListener: func(state balancer.SubConnState) { + b.updateSubConnState(subConn, state) + }, + }) if err != nil { - if logger.V(2) { - logger.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + if b.logger.V(2) { + b.logger.Infof("Failed to create new SubConn: %v", err) } b.state = connectivity.TransientFailure b.cc.UpdateState(balancer.State{ @@ -143,13 +159,19 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState return nil } +// UpdateSubConnState is unused as a StateListener is always registered when +// creating SubConns. func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { - if logger.V(2) { - logger.Infof("pickfirstBalancer: UpdateSubConnState: %p, %v", subConn, state) + b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state) +} + +func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) { + if b.logger.V(2) { + b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state) } if b.subConn != subConn { - if logger.V(2) { - logger.Infof("pickfirstBalancer: ignored state change because subConn is not recognized") + if b.logger.V(2) { + b.logger.Infof("Ignored state change because subConn is not recognized") } return } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/preloader.go b/.ci/providerlint/vendor/google.golang.org/grpc/preloader.go index cd45547854f..73bd6336433 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/preloader.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/preloader.go @@ -37,7 +37,7 @@ type PreparedMsg struct { } // Encode marshalls and compresses the message using the codec and compressor for the stream. -func (p *PreparedMsg) Encode(s Stream, msg interface{}) error { +func (p *PreparedMsg) Encode(s Stream, msg any) error { ctx := s.Context() rpcInfo, ok := rpcInfoFromContext(ctx) if !ok { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go b/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go index ececdb89c97..6f5c786b211 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1/reflection.pb.go @@ -21,7 +21,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // source: grpc/reflection/v1/reflection.proto diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go b/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go index d54c07676d5..69fbfb621ec 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/reflection/grpc_reflection_v1alpha/reflection.pb.go @@ -18,7 +18,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 +// protoc-gen-go v1.31.0 // protoc v4.22.0 // grpc/reflection/v1alpha/reflection.proto is a deprecated file. diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/.ci/providerlint/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000000..14aa6f20ae0 --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,36 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +// +// Deprecated: this package is imported by grpc and should not need to be +// imported directly by users. +package dns + +import ( + "google.golang.org/grpc/internal/resolver/dns" + "google.golang.org/grpc/resolver" +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +// +// Deprecated: import grpc and use resolver.Get("dns") instead. +func NewBuilder() resolver.Builder { + return dns.NewBuilder() +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/resolver/map.go b/.ci/providerlint/vendor/google.golang.org/grpc/resolver/map.go index efcb7f3efd8..ada5b9bb79b 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/resolver/map.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/resolver/map.go @@ -20,7 +20,7 @@ package resolver type addressMapEntry struct { addr Address - value interface{} + value any } // AddressMap is a map of addresses to arbitrary values taking into account @@ -69,7 +69,7 @@ func (l addressMapEntryList) find(addr Address) int { } // Get returns the value for the address in the map, if present. -func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { +func (a *AddressMap) Get(addr Address) (value any, ok bool) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -79,7 +79,7 @@ func (a *AddressMap) Get(addr Address) (value interface{}, ok bool) { } // Set updates or adds the value to the address in the map. -func (a *AddressMap) Set(addr Address, value interface{}) { +func (a *AddressMap) Set(addr Address, value any) { addrKey := toMapKey(&addr) entryList := a.m[addrKey] if entry := entryList.find(addr); entry != -1 { @@ -127,8 +127,8 @@ func (a *AddressMap) Keys() []Address { } // Values returns a slice of all current map values. -func (a *AddressMap) Values() []interface{} { - ret := make([]interface{}, 0, a.Len()) +func (a *AddressMap) Values() []any { + ret := make([]any, 0, a.Len()) for _, entryList := range a.m { for _, entry := range entryList { ret = append(ret, entry.value) @@ -136,3 +136,116 @@ func (a *AddressMap) Values() []interface{} { } return ret } + +type endpointNode struct { + addrs map[string]struct{} +} + +// Equal returns whether the unordered set of addrs are the same between the +// endpoint nodes. +func (en *endpointNode) Equal(en2 *endpointNode) bool { + if len(en.addrs) != len(en2.addrs) { + return false + } + for addr := range en.addrs { + if _, ok := en2.addrs[addr]; !ok { + return false + } + } + return true +} + +func toEndpointNode(endpoint Endpoint) endpointNode { + en := make(map[string]struct{}) + for _, addr := range endpoint.Addresses { + en[addr.Addr] = struct{}{} + } + return endpointNode{ + addrs: en, + } +} + +// EndpointMap is a map of endpoints to arbitrary values keyed on only the +// unordered set of address strings within an endpoint. This map is not thread +// safe, thus it is unsafe to access concurrently. Must be created via +// NewEndpointMap; do not construct directly. +type EndpointMap struct { + endpoints map[*endpointNode]any +} + +// NewEndpointMap creates a new EndpointMap. +func NewEndpointMap() *EndpointMap { + return &EndpointMap{ + endpoints: make(map[*endpointNode]any), + } +} + +// Get returns the value for the address in the map, if present. +func (em *EndpointMap) Get(e Endpoint) (value any, ok bool) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + return em.endpoints[endpoint], true + } + return nil, false +} + +// Set updates or adds the value to the address in the map. +func (em *EndpointMap) Set(e Endpoint, value any) { + en := toEndpointNode(e) + if endpoint := em.find(en); endpoint != nil { + em.endpoints[endpoint] = value + return + } + em.endpoints[&en] = value +} + +// Len returns the number of entries in the map. +func (em *EndpointMap) Len() int { + return len(em.endpoints) +} + +// Keys returns a slice of all current map keys, as endpoints specifying the +// addresses present in the endpoint keys, in which uniqueness is determined by +// the unordered set of addresses. Thus, endpoint information returned is not +// the full endpoint data (drops duplicated addresses and attributes) but can be +// used for EndpointMap accesses. +func (em *EndpointMap) Keys() []Endpoint { + ret := make([]Endpoint, 0, len(em.endpoints)) + for en := range em.endpoints { + var endpoint Endpoint + for addr := range en.addrs { + endpoint.Addresses = append(endpoint.Addresses, Address{Addr: addr}) + } + ret = append(ret, endpoint) + } + return ret +} + +// Values returns a slice of all current map values. +func (em *EndpointMap) Values() []any { + ret := make([]any, 0, len(em.endpoints)) + for _, val := range em.endpoints { + ret = append(ret, val) + } + return ret +} + +// find returns a pointer to the endpoint node in em if the endpoint node is +// already present. If not found, nil is returned. The comparisons are done on +// the unordered set of addresses within an endpoint. +func (em EndpointMap) find(e endpointNode) *endpointNode { + for endpoint := range em.endpoints { + if e.Equal(endpoint) { + return endpoint + } + } + return nil +} + +// Delete removes the specified endpoint from the map. +func (em *EndpointMap) Delete(e Endpoint) { + en := toEndpointNode(e) + if entry := em.find(en); entry != nil { + delete(em.endpoints, entry) + } +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/resolver/resolver.go b/.ci/providerlint/vendor/google.golang.org/grpc/resolver/resolver.go index d8db6f5d34e..bd1c7d01b7e 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/resolver/resolver.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/resolver/resolver.go @@ -77,25 +77,6 @@ func GetDefaultScheme() string { return defaultScheme } -// AddressType indicates the address type returned by name resolution. -// -// Deprecated: use Attributes in Address instead. -type AddressType uint8 - -const ( - // Backend indicates the address is for a backend server. - // - // Deprecated: use Attributes in Address instead. - Backend AddressType = iota - // GRPCLB indicates the address is for a grpclb load balancer. - // - // Deprecated: to select the GRPCLB load balancing policy, use a service - // config with a corresponding loadBalancingConfig. To supply balancer - // addresses to the GRPCLB load balancing policy, set State.Attributes - // using balancer/grpclb/state.Set. - GRPCLB -) - // Address represents a server the client connects to. // // # Experimental @@ -111,9 +92,6 @@ type Address struct { // the address, instead of the hostname from the Dial target string. In most cases, // this should not be set. // - // If Type is GRPCLB, ServerName should be the name of the remote load - // balancer, not the name of the backend. - // // WARNING: ServerName must only be populated with trusted values. It // is insecure to populate it with data from untrusted inputs since untrusted // values could be used to bypass the authority checks performed by TLS. @@ -126,18 +104,16 @@ type Address struct { // BalancerAttributes contains arbitrary data about this address intended // for consumption by the LB policy. These attributes do not affect SubConn // creation, connection establishment, handshaking, etc. - BalancerAttributes *attributes.Attributes - - // Type is the type of this address. // - // Deprecated: use Attributes instead. - Type AddressType + // Deprecated: when an Address is inside an Endpoint, this field should not + // be used, and it will eventually be removed entirely. + BalancerAttributes *attributes.Attributes // Metadata is the information associated with Addr, which may be used // to make load balancing decision. // // Deprecated: use Attributes instead. - Metadata interface{} + Metadata any } // Equal returns whether a and o are identical. Metadata is compared directly, @@ -150,7 +126,7 @@ func (a Address) Equal(o Address) bool { return a.Addr == o.Addr && a.ServerName == o.ServerName && a.Attributes.Equal(o.Attributes) && a.BalancerAttributes.Equal(o.BalancerAttributes) && - a.Type == o.Type && a.Metadata == o.Metadata + a.Metadata == o.Metadata } // String returns JSON formatted string representation of the address. @@ -194,11 +170,37 @@ type BuildOptions struct { Dialer func(context.Context, string) (net.Conn, error) } +// An Endpoint is one network endpoint, or server, which may have multiple +// addresses with which it can be accessed. +type Endpoint struct { + // Addresses contains a list of addresses used to access this endpoint. + Addresses []Address + + // Attributes contains arbitrary data about this endpoint intended for + // consumption by the LB policy. + Attributes *attributes.Attributes +} + // State contains the current Resolver state relevant to the ClientConn. type State struct { // Addresses is the latest set of resolved addresses for the target. + // + // If a resolver sets Addresses but does not set Endpoints, one Endpoint + // will be created for each Address before the State is passed to the LB + // policy. The BalancerAttributes of each entry in Addresses will be set + // in Endpoints.Attributes, and be cleared in the Endpoint's Address's + // BalancerAttributes. + // + // Soon, Addresses will be deprecated and replaced fully by Endpoints. Addresses []Address + // Endpoints is the latest set of resolved endpoints for the target. + // + // If a resolver produces a State containing Endpoints but not Addresses, + // it must take care to ensure the LB policies it selects will support + // Endpoints. + Endpoints []Endpoint + // ServiceConfig contains the result from parsing the latest service // config. If it is nil, it indicates no service config is present or the // resolver does not provide service configs. @@ -238,11 +240,6 @@ type ClientConn interface { // // Deprecated: Use UpdateState instead. NewAddress(addresses []Address) - // NewServiceConfig is called by resolver to notify ClientConn a new - // service config. The service config should be provided as a json string. - // - // Deprecated: Use UpdateState instead. - NewServiceConfig(serviceConfig string) // ParseServiceConfig parses the provided service config and returns an // object that provides the parsed config. ParseServiceConfig(serviceConfigJSON string) *serviceconfig.ParseResult @@ -258,15 +255,6 @@ type ClientConn interface { // target does not contain a scheme or if the parsed scheme is not registered // (i.e. no corresponding resolver available to resolve the endpoint), we will // apply the default scheme, and will attempt to reparse it. -// -// Examples: -// -// - "dns://some_authority/foo.bar" -// Target{Scheme: "dns", Authority: "some_authority", Endpoint: "foo.bar"} -// - "foo.bar" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "foo.bar"} -// - "unknown_scheme://authority/endpoint" -// Target{Scheme: resolver.GetDefaultScheme(), Endpoint: "unknown_scheme://authority/endpoint"} type Target struct { // URL contains the parsed dial target with an optional default scheme added // to it if the original dial target contained no scheme or contained an @@ -293,6 +281,11 @@ func (t Target) Endpoint() string { return strings.TrimPrefix(endpoint, "/") } +// String returns a string representation of Target. +func (t Target) String() string { + return t.URL.String() +} + // Builder creates a resolver that will be used to watch name resolution updates. type Builder interface { // Build creates a new resolver for the given target. @@ -321,10 +314,3 @@ type Resolver interface { // Close closes the resolver. Close() } - -// UnregisterForTesting removes the resolver builder with the given scheme from the -// resolver map. -// This function is for testing only. -func UnregisterForTesting(scheme string) { - delete(m, scheme) -} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/.ci/providerlint/vendor/google.golang.org/grpc/resolver_conn_wrapper.go deleted file mode 100644 index b408b3688f2..00000000000 --- a/.ci/providerlint/vendor/google.golang.org/grpc/resolver_conn_wrapper.go +++ /dev/null @@ -1,239 +0,0 @@ -/* - * - * Copyright 2017 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package grpc - -import ( - "context" - "strings" - "sync" - - "google.golang.org/grpc/balancer" - "google.golang.org/grpc/internal/channelz" - "google.golang.org/grpc/internal/grpcsync" - "google.golang.org/grpc/internal/pretty" - "google.golang.org/grpc/resolver" - "google.golang.org/grpc/serviceconfig" -) - -// resolverStateUpdater wraps the single method used by ccResolverWrapper to -// report a state update from the actual resolver implementation. -type resolverStateUpdater interface { - updateResolverState(s resolver.State, err error) error -} - -// ccResolverWrapper is a wrapper on top of cc for resolvers. -// It implements resolver.ClientConn interface. -type ccResolverWrapper struct { - // The following fields are initialized when the wrapper is created and are - // read-only afterwards, and therefore can be accessed without a mutex. - cc resolverStateUpdater - channelzID *channelz.Identifier - ignoreServiceConfig bool - opts ccResolverWrapperOpts - serializer *grpcsync.CallbackSerializer // To serialize all incoming calls. - serializerCancel context.CancelFunc // To close the serializer, accessed only from close(). - - // All incoming (resolver --> gRPC) calls are guaranteed to execute in a - // mutually exclusive manner as they are scheduled on the serializer. - // Fields accessed *only* in these serializer callbacks, can therefore be - // accessed without a mutex. - curState resolver.State - - // mu guards access to the below fields. - mu sync.Mutex - closed bool - resolver resolver.Resolver // Accessed only from outgoing calls. -} - -// ccResolverWrapperOpts wraps the arguments to be passed when creating a new -// ccResolverWrapper. -type ccResolverWrapperOpts struct { - target resolver.Target // User specified dial target to resolve. - builder resolver.Builder // Resolver builder to use. - bOpts resolver.BuildOptions // Resolver build options to use. - channelzID *channelz.Identifier // Channelz identifier for the channel. -} - -// newCCResolverWrapper uses the resolver.Builder to build a Resolver and -// returns a ccResolverWrapper object which wraps the newly built resolver. -func newCCResolverWrapper(cc resolverStateUpdater, opts ccResolverWrapperOpts) (*ccResolverWrapper, error) { - ctx, cancel := context.WithCancel(context.Background()) - ccr := &ccResolverWrapper{ - cc: cc, - channelzID: opts.channelzID, - ignoreServiceConfig: opts.bOpts.DisableServiceConfig, - opts: opts, - serializer: grpcsync.NewCallbackSerializer(ctx), - serializerCancel: cancel, - } - - // Cannot hold the lock at build time because the resolver can send an - // update or error inline and these incoming calls grab the lock to schedule - // a callback in the serializer. - r, err := opts.builder.Build(opts.target, ccr, opts.bOpts) - if err != nil { - cancel() - return nil, err - } - - // Any error reported by the resolver at build time that leads to a - // re-resolution request from the balancer is dropped by grpc until we - // return from this function. So, we don't have to handle pending resolveNow - // requests here. - ccr.mu.Lock() - ccr.resolver = r - ccr.mu.Unlock() - - return ccr, nil -} - -func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { - ccr.mu.Lock() - defer ccr.mu.Unlock() - - // ccr.resolver field is set only after the call to Build() returns. But in - // the process of building, the resolver may send an error update which when - // propagated to the balancer may result in a re-resolution request. - if ccr.closed || ccr.resolver == nil { - return - } - ccr.resolver.ResolveNow(o) -} - -func (ccr *ccResolverWrapper) close() { - ccr.mu.Lock() - if ccr.closed { - ccr.mu.Unlock() - return - } - - channelz.Info(logger, ccr.channelzID, "Closing the name resolver") - - // Close the serializer to ensure that no more calls from the resolver are - // handled, before actually closing the resolver. - ccr.serializerCancel() - ccr.closed = true - r := ccr.resolver - ccr.mu.Unlock() - - // Give enqueued callbacks a chance to finish. - <-ccr.serializer.Done - - // Spawn a goroutine to close the resolver (since it may block trying to - // cleanup all allocated resources) and return early. - go r.Close() -} - -// serializerScheduleLocked is a convenience method to schedule a function to be -// run on the serializer while holding ccr.mu. -func (ccr *ccResolverWrapper) serializerScheduleLocked(f func(context.Context)) { - ccr.mu.Lock() - ccr.serializer.Schedule(f) - ccr.mu.Unlock() -} - -// UpdateState is called by resolver implementations to report new state to gRPC -// which includes addresses and service config. -func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { - errCh := make(chan error, 1) - ok := ccr.serializer.Schedule(func(context.Context) { - ccr.addChannelzTraceEvent(s) - ccr.curState = s - if err := ccr.cc.updateResolverState(ccr.curState, nil); err == balancer.ErrBadResolverState { - errCh <- balancer.ErrBadResolverState - return - } - errCh <- nil - }) - if !ok { - // The only time when Schedule() fail to add the callback to the - // serializer is when the serializer is closed, and this happens only - // when the resolver wrapper is closed. - return nil - } - return <-errCh -} - -// ReportError is called by resolver implementations to report errors -// encountered during name resolution to gRPC. -func (ccr *ccResolverWrapper) ReportError(err error) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) - ccr.cc.updateResolverState(resolver.State{}, err) - }) -} - -// NewAddress is called by the resolver implementation to send addresses to -// gRPC. -func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { - ccr.serializerScheduleLocked(func(_ context.Context) { - ccr.addChannelzTraceEvent(resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig}) - ccr.curState.Addresses = addrs - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// NewServiceConfig is called by the resolver implementation to send service -// configs to gRPC. -func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { - ccr.serializerScheduleLocked(func(_ context.Context) { - channelz.Infof(logger, ccr.channelzID, "ccResolverWrapper: got new service config: %s", sc) - if ccr.ignoreServiceConfig { - channelz.Info(logger, ccr.channelzID, "Service config lookups disabled; ignoring config") - return - } - scpr := parseServiceConfig(sc) - if scpr.Err != nil { - channelz.Warningf(logger, ccr.channelzID, "ccResolverWrapper: error parsing service config: %v", scpr.Err) - return - } - ccr.addChannelzTraceEvent(resolver.State{Addresses: ccr.curState.Addresses, ServiceConfig: scpr}) - ccr.curState.ServiceConfig = scpr - ccr.cc.updateResolverState(ccr.curState, nil) - }) -} - -// ParseServiceConfig is called by resolver implementations to parse a JSON -// representation of the service config. -func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { - return parseServiceConfig(scJSON) -} - -// addChannelzTraceEvent adds a channelz trace event containing the new -// state received from resolver implementations. -func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { - var updates []string - var oldSC, newSC *ServiceConfig - var oldOK, newOK bool - if ccr.curState.ServiceConfig != nil { - oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) - } - if s.ServiceConfig != nil { - newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) - } - if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { - updates = append(updates, "service config updated") - } - if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { - updates = append(updates, "resolver returned an empty address list") - } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { - updates = append(updates, "resolver returned new addresses") - } - channelz.Infof(logger, ccr.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) -} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/resolver_wrapper.go b/.ci/providerlint/vendor/google.golang.org/grpc/resolver_wrapper.go new file mode 100644 index 00000000000..c79bab12149 --- /dev/null +++ b/.ci/providerlint/vendor/google.golang.org/grpc/resolver_wrapper.go @@ -0,0 +1,197 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "strings" + "sync" + + "google.golang.org/grpc/internal/channelz" + "google.golang.org/grpc/internal/grpcsync" + "google.golang.org/grpc/internal/pretty" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/serviceconfig" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConn interface. +type ccResolverWrapper struct { + // The following fields are initialized when the wrapper is created and are + // read-only afterwards, and therefore can be accessed without a mutex. + cc *ClientConn + ignoreServiceConfig bool + serializer *grpcsync.CallbackSerializer + serializerCancel context.CancelFunc + + resolver resolver.Resolver // only accessed within the serializer + + // The following fields are protected by mu. Caller must take cc.mu before + // taking mu. + mu sync.Mutex + curState resolver.State + closed bool +} + +// newCCResolverWrapper initializes the ccResolverWrapper. It can only be used +// after calling start, which builds the resolver. +func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper { + ctx, cancel := context.WithCancel(cc.ctx) + return &ccResolverWrapper{ + cc: cc, + ignoreServiceConfig: cc.dopts.disableServiceConfig, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + } +} + +// start builds the name resolver using the resolver.Builder in cc and returns +// any error encountered. It must always be the first operation performed on +// any newly created ccResolverWrapper, except that close may be called instead. +func (ccr *ccResolverWrapper) start() error { + errCh := make(chan error) + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil { + return + } + opts := resolver.BuildOptions{ + DisableServiceConfig: ccr.cc.dopts.disableServiceConfig, + DialCreds: ccr.cc.dopts.copts.TransportCredentials, + CredsBundle: ccr.cc.dopts.copts.CredsBundle, + Dialer: ccr.cc.dopts.copts.Dialer, + } + var err error + ccr.resolver, err = ccr.cc.resolverBuilder.Build(ccr.cc.parsedTarget, ccr, opts) + errCh <- err + }) + return <-errCh +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) { + ccr.serializer.Schedule(func(ctx context.Context) { + if ctx.Err() != nil || ccr.resolver == nil { + return + } + ccr.resolver.ResolveNow(o) + }) +} + +// close initiates async shutdown of the wrapper. To determine the wrapper has +// finished shutting down, the channel should block on ccr.serializer.Done() +// without cc.mu held. +func (ccr *ccResolverWrapper) close() { + channelz.Info(logger, ccr.cc.channelzID, "Closing the name resolver") + ccr.mu.Lock() + ccr.closed = true + ccr.mu.Unlock() + + ccr.serializer.Schedule(func(context.Context) { + if ccr.resolver == nil { + return + } + ccr.resolver.Close() + ccr.resolver = nil + }) + ccr.serializerCancel() +} + +// UpdateState is called by resolver implementations to report new state to gRPC +// which includes addresses and service config. +func (ccr *ccResolverWrapper) UpdateState(s resolver.State) error { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return nil + } + if s.Endpoints == nil { + s.Endpoints = make([]resolver.Endpoint, 0, len(s.Addresses)) + for _, a := range s.Addresses { + ep := resolver.Endpoint{Addresses: []resolver.Address{a}, Attributes: a.BalancerAttributes} + ep.Addresses[0].BalancerAttributes = nil + s.Endpoints = append(s.Endpoints, ep) + } + } + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + return ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ReportError is called by resolver implementations to report errors +// encountered during name resolution to gRPC. +func (ccr *ccResolverWrapper) ReportError(err error) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + ccr.mu.Unlock() + channelz.Warningf(logger, ccr.cc.channelzID, "ccResolverWrapper: reporting error to cc: %v", err) + ccr.cc.updateResolverStateAndUnlock(resolver.State{}, err) +} + +// NewAddress is called by the resolver implementation to send addresses to +// gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + ccr.cc.mu.Lock() + ccr.mu.Lock() + if ccr.closed { + ccr.mu.Unlock() + ccr.cc.mu.Unlock() + return + } + s := resolver.State{Addresses: addrs, ServiceConfig: ccr.curState.ServiceConfig} + ccr.addChannelzTraceEvent(s) + ccr.curState = s + ccr.mu.Unlock() + ccr.cc.updateResolverStateAndUnlock(s, nil) +} + +// ParseServiceConfig is called by resolver implementations to parse a JSON +// representation of the service config. +func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult { + return parseServiceConfig(scJSON) +} + +// addChannelzTraceEvent adds a channelz trace event containing the new +// state received from resolver implementations. +func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) { + var updates []string + var oldSC, newSC *ServiceConfig + var oldOK, newOK bool + if ccr.curState.ServiceConfig != nil { + oldSC, oldOK = ccr.curState.ServiceConfig.Config.(*ServiceConfig) + } + if s.ServiceConfig != nil { + newSC, newOK = s.ServiceConfig.Config.(*ServiceConfig) + } + if oldOK != newOK || (oldOK && newOK && oldSC.rawJSONString != newSC.rawJSONString) { + updates = append(updates, "service config updated") + } + if len(ccr.curState.Addresses) > 0 && len(s.Addresses) == 0 { + updates = append(updates, "resolver returned an empty address list") + } else if len(ccr.curState.Addresses) == 0 && len(s.Addresses) > 0 { + updates = append(updates, "resolver returned new addresses") + } + channelz.Infof(logger, ccr.cc.channelzID, "Resolver state updated: %s (%v)", pretty.ToJSON(s), strings.Join(updates, "; ")) +} diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/rpc_util.go b/.ci/providerlint/vendor/google.golang.org/grpc/rpc_util.go index a844d28f49d..b7723aa09cb 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/rpc_util.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/rpc_util.go @@ -75,7 +75,7 @@ func NewGZIPCompressorWithLevel(level int) (Compressor, error) { } return &gzipCompressor{ pool: sync.Pool{ - New: func() interface{} { + New: func() any { w, err := gzip.NewWriterLevel(io.Discard, level) if err != nil { panic(err) @@ -626,7 +626,7 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt // encode serializes msg and returns a buffer containing the message, or an // error if it is too large to be transmitted by grpc. If msg is nil, it // generates an empty message. -func encode(c baseCodec, msg interface{}) ([]byte, error) { +func encode(c baseCodec, msg any) ([]byte, error) { if msg == nil { // NOTE: typed nils will not be caught by this check return nil, nil } @@ -693,7 +693,7 @@ func msgHeader(data, compData []byte) (hdr []byte, payload []byte) { return hdr, data } -func outPayload(client bool, msg interface{}, data, payload []byte, t time.Time) *stats.OutPayload { +func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload { return &stats.OutPayload{ Client: client, Payload: msg, @@ -792,7 +792,7 @@ func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize // For the two compressor parameters, both should not be set, but if they are, // dc takes precedence over compressor. // TODO(dfawley): wrap the old compressor/decompressor using the new API? -func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error { buf, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor) if err != nil { return err @@ -863,19 +863,22 @@ func ErrorDesc(err error) string { // Errorf returns nil if c is OK. // // Deprecated: use status.Errorf instead. -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return status.Errorf(c, format, a...) } +var errContextCanceled = status.Error(codes.Canceled, context.Canceled.Error()) +var errContextDeadline = status.Error(codes.DeadlineExceeded, context.DeadlineExceeded.Error()) + // toRPCErr converts an error into an error from the status package. func toRPCErr(err error) error { switch err { case nil, io.EOF: return err case context.DeadlineExceeded: - return status.Error(codes.DeadlineExceeded, err.Error()) + return errContextDeadline case context.Canceled: - return status.Error(codes.Canceled, err.Error()) + return errContextCanceled case io.ErrUnexpectedEOF: return status.Error(codes.Internal, err.Error()) } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/server.go b/.ci/providerlint/vendor/google.golang.org/grpc/server.go index b44c7e86c3b..2fa694d555e 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/server.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/server.go @@ -70,6 +70,10 @@ func init() { internal.GetServerCredentials = func(srv *Server) credentials.TransportCredentials { return srv.opts.creds } + internal.IsRegisteredMethod = func(srv *Server, method string) bool { + return srv.isRegisteredMethod(method) + } + internal.ServerFromContext = serverFromContext internal.DrainServerTransports = func(srv *Server, addr string) { srv.drainServerTransports(addr) } @@ -81,12 +85,13 @@ func init() { } internal.BinaryLogger = binaryLogger internal.JoinServerOptions = newJoinServerOption + internal.RecvBufferPool = recvBufferPool } var statusOK = status.New(codes.OK, "") var logger = grpclog.Component("core") -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) +type methodHandler func(srv any, ctx context.Context, dec func(any) error, interceptor UnaryServerInterceptor) (any, error) // MethodDesc represents an RPC service's method specification. type MethodDesc struct { @@ -99,20 +104,20 @@ type ServiceDesc struct { ServiceName string // The pointer to the service interface. Used to check whether the user // provided implementation satisfies the interface requirements. - HandlerType interface{} + HandlerType any Methods []MethodDesc Streams []StreamDesc - Metadata interface{} + Metadata any } // serviceInfo wraps information about a service. It is very similar to // ServiceDesc and is constructed from it for internal purposes. type serviceInfo struct { // Contains the implementation for the methods in this service. - serviceImpl interface{} + serviceImpl any methods map[string]*MethodDesc streams map[string]*StreamDesc - mdata interface{} + mdata any } // Server is a gRPC server to serve RPC requests. @@ -164,6 +169,7 @@ type serverOptions struct { initialConnWindowSize int32 writeBufferSize int readBufferSize int + sharedWriteBuffer bool connectionTimeout time.Duration maxHeaderListSize *uint32 headerTableSize *uint32 @@ -230,6 +236,20 @@ func newJoinServerOption(opts ...ServerOption) ServerOption { return &joinServerOption{opts: opts} } +// SharedWriteBuffer allows reusing per-connection transport write buffer. +// If this option is set to true every connection will release the buffer after +// flushing the data on the wire. +// +// # Experimental +// +// Notice: This API is EXPERIMENTAL and may be changed or removed in a +// later release. +func SharedWriteBuffer(val bool) ServerOption { + return newFuncServerOption(func(o *serverOptions) { + o.sharedWriteBuffer = val + }) +} + // WriteBufferSize determines how much data can be batched before doing a write // on the wire. The corresponding memory allocation for this buffer will be // twice the size to keep syscalls low. The default value for this buffer is @@ -270,9 +290,9 @@ func InitialConnWindowSize(s int32) ServerOption { // KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { - if kp.Time > 0 && kp.Time < time.Second { + if kp.Time > 0 && kp.Time < internal.KeepaliveMinServerPingTime { logger.Warning("Adjusting keepalive ping interval to minimum period of 1s") - kp.Time = time.Second + kp.Time = internal.KeepaliveMinServerPingTime } return newFuncServerOption(func(o *serverOptions) { @@ -563,11 +583,13 @@ func NumStreamWorkers(numServerWorkers uint32) ServerOption { // options are used: StatsHandler, EnableTracing, or binary logging. In such // cases, the shared buffer pool will be ignored. // -// # Experimental -// -// Notice: This API is EXPERIMENTAL and may be changed or removed in a -// later release. +// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in +// v1.60.0 or later. func RecvBufferPool(bufferPool SharedBufferPool) ServerOption { + return recvBufferPool(bufferPool) +} + +func recvBufferPool(bufferPool SharedBufferPool) ServerOption { return newFuncServerOption(func(o *serverOptions) { o.recvBufferPool = bufferPool }) @@ -648,7 +670,7 @@ func NewServer(opt ...ServerOption) *Server { // printf records an event in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { +func (s *Server) printf(format string, a ...any) { if s.events != nil { s.events.Printf(format, a...) } @@ -656,7 +678,7 @@ func (s *Server) printf(format string, a ...interface{}) { // errorf records an error in s's event log, unless s has been stopped. // REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { +func (s *Server) errorf(format string, a ...any) { if s.events != nil { s.events.Errorf(format, a...) } @@ -671,14 +693,14 @@ type ServiceRegistrar interface { // once the server has started serving. // desc describes the service and its methods and handlers. impl is the // service implementation which is passed to the method handlers. - RegisterService(desc *ServiceDesc, impl interface{}) + RegisterService(desc *ServiceDesc, impl any) } // RegisterService registers a service and its implementation to the gRPC // server. It is called from the IDL generated code. This must be called before // invoking Serve. If ss is non-nil (for legacy code), its type is checked to // ensure it implements sd.HandlerType. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { +func (s *Server) RegisterService(sd *ServiceDesc, ss any) { if ss != nil { ht := reflect.TypeOf(sd.HandlerType).Elem() st := reflect.TypeOf(ss) @@ -689,7 +711,7 @@ func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { s.register(sd, ss) } -func (s *Server) register(sd *ServiceDesc, ss interface{}) { +func (s *Server) register(sd *ServiceDesc, ss any) { s.mu.Lock() defer s.mu.Unlock() s.printf("RegisterService(%q)", sd.ServiceName) @@ -730,7 +752,7 @@ type MethodInfo struct { type ServiceInfo struct { Methods []MethodInfo // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} + Metadata any } // GetServiceInfo returns a map from service names to ServiceInfo. @@ -791,6 +813,18 @@ func (l *listenSocket) Close() error { // Serve returns when lis.Accept fails with fatal errors. lis will be closed when // this method returns. // Serve will return a non-nil error unless Stop or GracefulStop is called. +// +// Note: All supported releases of Go (as of December 2023) override the OS +// defaults for TCP keepalive time and interval to 15s. To enable TCP keepalive +// with OS defaults for keepalive time and interval, callers need to do the +// following two things: +// - pass a net.Listener created by calling the Listen method on a +// net.ListenConfig with the `KeepAlive` field set to a negative value. This +// will result in the Go standard library not overriding OS defaults for TCP +// keepalive interval and time. But this will also result in the Go standard +// library not enabling TCP keepalives by default. +// - override the Accept method on the passed in net.Listener and set the +// SO_KEEPALIVE socket option to enable TCP keepalives, with OS defaults. func (s *Server) Serve(lis net.Listener) error { s.mu.Lock() s.printf("serving") @@ -902,7 +936,7 @@ func (s *Server) handleRawConn(lisAddr string, rawConn net.Conn) { return } go func() { - s.serveStreams(st) + s.serveStreams(context.Background(), st, rawConn) s.removeConn(lisAddr, st) }() } @@ -931,6 +965,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { InitialConnWindowSize: s.opts.initialConnWindowSize, WriteBufferSize: s.opts.writeBufferSize, ReadBufferSize: s.opts.readBufferSize, + SharedWriteBuffer: s.opts.sharedWriteBuffer, ChannelzParentID: s.channelzID, MaxHeaderListSize: s.opts.maxHeaderListSize, HeaderTableSize: s.opts.headerTableSize, @@ -955,19 +990,30 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport { return st } -func (s *Server) serveStreams(st transport.ServerTransport) { - defer st.Close(errors.New("finished serving streams for the server transport")) - var wg sync.WaitGroup +func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) { + ctx = transport.SetConnection(ctx, rawConn) + ctx = peer.NewContext(ctx, st.Peer()) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagConn(ctx, &stats.ConnTagInfo{ + RemoteAddr: st.Peer().Addr, + LocalAddr: st.Peer().LocalAddr, + }) + sh.HandleConn(ctx, &stats.ConnBegin{}) + } - streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) + defer func() { + st.Close(errors.New("finished serving streams for the server transport")) + for _, sh := range s.opts.statsHandlers { + sh.HandleConn(ctx, &stats.ConnEnd{}) + } + }() + streamQuota := newHandlerQuota(s.opts.maxConcurrentStreams) + st.HandleStreams(ctx, func(stream *transport.Stream) { streamQuota.acquire() f := func() { defer streamQuota.release() - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -979,14 +1025,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) - wg.Wait() } var _ http.Handler = (*Server)(nil) @@ -1030,31 +1069,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } defer s.removeConn(listenerAddressForServeHTTP, st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo + s.serveStreams(r.Context(), st, nil) } func (s *Server) addConn(addr string, st transport.ServerTransport) bool { @@ -1117,7 +1132,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1136,7 +1151,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1164,7 +1179,7 @@ func chainUnaryServerInterceptors(s *Server) { } func chainUnaryInterceptors(interceptors []UnaryServerInterceptor) UnaryServerInterceptor { - return func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (interface{}, error) { + return func(ctx context.Context, req any, info *UnaryServerInfo, handler UnaryHandler) (any, error) { return interceptors[0](ctx, req, info, getChainUnaryHandler(interceptors, 0, info, handler)) } } @@ -1173,12 +1188,12 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info if curr == len(interceptors)-1 { return finalHandler } - return func(ctx context.Context, req interface{}) (interface{}, error) { + return func(ctx context.Context, req any) (any, error) { return interceptors[curr+1](ctx, req, info, getChainUnaryHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1192,7 +1207,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1210,7 +1225,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. defer func() { if trInfo != nil { if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) trInfo.tr.SetError() } trInfo.tr.Finish() @@ -1224,7 +1239,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1246,7 +1261,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1327,12 +1341,12 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if channelz.IsOn() { t.IncrMsgRecv() } - df := func(v interface{}) error { + df := func(v any) error { if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil { return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1346,7 +1360,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1354,7 +1368,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1379,7 +1393,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1387,7 +1401,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1402,7 +1416,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1429,8 +1443,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1444,8 +1458,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1463,7 +1477,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1491,7 +1505,7 @@ func chainStreamServerInterceptors(s *Server) { } func chainStreamInterceptors(interceptors []StreamServerInterceptor) StreamServerInterceptor { - return func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { + return func(srv any, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error { return interceptors[0](srv, ss, info, getChainStreamHandler(interceptors, 0, info, handler)) } } @@ -1500,12 +1514,12 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf if curr == len(interceptors)-1 { return finalHandler } - return func(srv interface{}, stream ServerStream) error { + return func(srv any, stream ServerStream) error { return interceptors[curr+1](srv, stream, info, getChainStreamHandler(interceptors, curr+1, info, finalHandler)) } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1519,10 +1533,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1541,7 +1555,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp if trInfo != nil { ss.mu.Lock() if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } ss.trInfo.tr.Finish() @@ -1558,7 +1572,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1600,7 +1614,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1644,7 +1658,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp trInfo.tr.LazyLog(&trInfo.firstLine, false) } var appErr error - var server interface{} + var server any if info != nil { server = info.serviceImpl } @@ -1678,7 +1692,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1696,53 +1710,87 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + ctx = contextWithServer(ctx, s) + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.Peer().Addr, + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } service := sm[:pos] method := sm[pos+1:] + md, _ := metadata.FromIncomingContext(ctx) + for _, sh := range s.opts.statsHandlers { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()}) + sh.HandleRPC(ctx, &stats.InHeader{ + FullMethod: stream.Method(), + RemoteAddr: t.Peer().Addr, + LocalAddr: t.Peer().LocalAddr, + Compression: stream.RecvCompress(), + WireLength: stream.HeaderWireLength(), + Header: md, + }) + } + // To have calls in stream callouts work. Will delete once all stats handler + // calls come from the gRPC layer. + stream.SetContext(ctx) + srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1751,19 +1799,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } @@ -1818,62 +1866,64 @@ func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream // pending RPCs on the client side will get notified by connection // errors. func (s *Server) Stop() { - s.quit.Fire() + s.stop(false) +} - defer func() { - s.serveWG.Wait() - s.done.Fire() - }() +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.stop(true) +} + +func (s *Server) stop(graceful bool) { + s.quit.Fire() + defer s.done.Fire() s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) s.mu.Lock() - listeners := s.lis - s.lis = nil - conns := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Broadcast() + s.closeListenersLocked() + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. s.mu.Unlock() + s.serveWG.Wait() - for lis := range listeners { - lis.Close() - } - for _, cs := range conns { - for st := range cs { - st.Close(errors.New("Server.Stop called")) - } + s.mu.Lock() + defer s.mu.Unlock() + + if graceful { + s.drainAllServerTransportsLocked() + } else { + s.closeServerTransportsLocked() } + if s.opts.numServerWorkers > 0 { s.stopServerWorkers() } - s.mu.Lock() + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { s.events.Finish() s.events = nil } - s.mu.Unlock() } -// GracefulStop stops the gRPC server gracefully. It stops the server from -// accepting new connections and RPCs and blocks until all the pending RPCs are -// finished. -func (s *Server) GracefulStop() { - s.quit.Fire() - defer s.done.Fire() - - s.channelzRemoveOnce.Do(func() { channelz.RemoveEntry(s.channelzID) }) - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - return +// s.mu must be held by the caller. +func (s *Server) closeServerTransportsLocked() { + for _, conns := range s.conns { + for st := range conns { + st.Close(errors.New("Server.Stop called")) + } } +} - for lis := range s.lis { - lis.Close() - } - s.lis = nil +// s.mu must be held by the caller. +func (s *Server) drainAllServerTransportsLocked() { if !s.drain { for _, conns := range s.conns { for st := range conns { @@ -1882,22 +1932,14 @@ func (s *Server) GracefulStop() { } s.drain = true } +} - // Wait for serving threads to be ready to exit. Only then can we be sure no - // new conns will be created. - s.mu.Unlock() - s.serveWG.Wait() - s.mu.Lock() - - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil +// s.mu must be held by the caller. +func (s *Server) closeListenersLocked() { + for lis := range s.lis { + lis.Close() } - s.mu.Unlock() + s.lis = nil } // contentSubtype must be lowercase @@ -1911,11 +1953,50 @@ func (s *Server) getCodec(contentSubtype string) baseCodec { } codec := encoding.GetCodec(contentSubtype) if codec == nil { + logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name) return encoding.GetCodec(proto.Name) } return codec } +type serverKey struct{} + +// serverFromContext gets the Server from the context. +func serverFromContext(ctx context.Context) *Server { + s, _ := ctx.Value(serverKey{}).(*Server) + return s +} + +// contextWithServer sets the Server in the context. +func contextWithServer(ctx context.Context, server *Server) context.Context { + return context.WithValue(ctx, serverKey{}, server) +} + +// isRegisteredMethod returns whether the passed in method is registered as a +// method on the server. /service/method and service/method will match if the +// service and method are registered on the server. +func (s *Server) isRegisteredMethod(serviceMethod string) bool { + if serviceMethod != "" && serviceMethod[0] == '/' { + serviceMethod = serviceMethod[1:] + } + pos := strings.LastIndex(serviceMethod, "/") + if pos == -1 { // Invalid method name syntax. + return false + } + service := serviceMethod[:pos] + method := serviceMethod[pos+1:] + srv, knownService := s.services[service] + if knownService { + if _, ok := srv.methods[method]; ok { + return true + } + if _, ok := srv.streams[method]; ok { + return true + } + } + return false +} + // SetHeader sets the header metadata to be sent from the server to the client. // The context provided must be the context passed to the server's handler. // @@ -2077,12 +2158,12 @@ func validateSendCompressor(name, clientCompressors string) error { // atomicSemaphore implements a blocking, counting semaphore. acquire should be // called synchronously; release may be called asynchronously. type atomicSemaphore struct { - n int64 // accessed atomically + n atomic.Int64 wait chan struct{} } func (q *atomicSemaphore) acquire() { - if atomic.AddInt64(&q.n, -1) < 0 { + if q.n.Add(-1) < 0 { // We ran out of quota. Block until a release happens. <-q.wait } @@ -2093,12 +2174,14 @@ func (q *atomicSemaphore) release() { // concurrent calls to acquire, but also note that with synchronous calls to // acquire, as our system does, n will never be less than -1. There are // fairness issues (queuing) to consider if this was to be generalized. - if atomic.AddInt64(&q.n, -1) <= 0 { + if q.n.Add(1) <= 0 { // An acquire was waiting on us. Unblock it. q.wait <- struct{}{} } } func newHandlerQuota(n uint32) *atomicSemaphore { - return &atomicSemaphore{n: int64(n), wait: make(chan struct{}, 1)} + a := &atomicSemaphore{wait: make(chan struct{}, 1)} + a.n.Store(int64(n)) + return a } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/shared_buffer_pool.go b/.ci/providerlint/vendor/google.golang.org/grpc/shared_buffer_pool.go index c3a5a9ac1f1..48a64cfe8e2 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/shared_buffer_pool.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/shared_buffer_pool.go @@ -109,7 +109,7 @@ const ( type simpleSharedBufferChildPool interface { Get(size int) []byte - Put(interface{}) + Put(any) } type bufferPool struct { @@ -133,7 +133,7 @@ func (p *bufferPool) Get(size int) []byte { func newBytesPool(size int) simpleSharedBufferChildPool { return &bufferPool{ Pool: sync.Pool{ - New: func() interface{} { + New: func() any { bs := make([]byte, size) return &bs }, diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/stats/stats.go b/.ci/providerlint/vendor/google.golang.org/grpc/stats/stats.go index 7a552a9b787..4ab70e2d462 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/stats/stats.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/stats/stats.go @@ -59,12 +59,22 @@ func (s *Begin) IsClient() bool { return s.Client } func (s *Begin) isRPCStats() {} +// PickerUpdated indicates that the LB policy provided a new picker while the +// RPC was waiting for one. +type PickerUpdated struct{} + +// IsClient indicates if the stats information is from client side. Only Client +// Side interfaces with a Picker, thus always returns true. +func (*PickerUpdated) IsClient() bool { return true } + +func (*PickerUpdated) isRPCStats() {} + // InPayload contains the information for an incoming payload. type InPayload struct { // Client is true if this InPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte @@ -134,7 +144,7 @@ type OutPayload struct { // Client is true if this OutPayload is from client side. Client bool // Payload is the payload with original type. - Payload interface{} + Payload any // Data is the serialized message payload. Data []byte // Length is the size of the uncompressed payload data. Does not include any diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/status/status.go b/.ci/providerlint/vendor/google.golang.org/grpc/status/status.go index bcf2e4d81be..a93360efb84 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/status/status.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/status/status.go @@ -50,7 +50,7 @@ func New(c codes.Code, msg string) *Status { } // Newf returns New(c, fmt.Sprintf(format, a...)). -func Newf(c codes.Code, format string, a ...interface{}) *Status { +func Newf(c codes.Code, format string, a ...any) *Status { return New(c, fmt.Sprintf(format, a...)) } @@ -60,7 +60,7 @@ func Error(c codes.Code, msg string) error { } // Errorf returns Error(c, fmt.Sprintf(format, a...)). -func Errorf(c codes.Code, format string, a ...interface{}) error { +func Errorf(c codes.Code, format string, a ...any) error { return Error(c, fmt.Sprintf(format, a...)) } @@ -99,25 +99,27 @@ func FromError(err error) (s *Status, ok bool) { } type grpcstatus interface{ GRPCStatus() *Status } if gs, ok := err.(grpcstatus); ok { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error has status nil, which maps to codes.OK. There // is no sensible behavior for this, so we turn it into // an error with codes.Unknown and discard the existing // status. return New(codes.Unknown, err.Error()), false } - return gs.GRPCStatus(), true + return grpcStatus, true } var gs grpcstatus if errors.As(err, &gs) { - if gs.GRPCStatus() == nil { + grpcStatus := gs.GRPCStatus() + if grpcStatus == nil { // Error wraps an error that has status nil, which maps // to codes.OK. There is no sensible behavior for this, // so we turn it into an error with codes.Unknown and // discard the existing status. return New(codes.Unknown, err.Error()), false } - p := gs.GRPCStatus().Proto() + p := grpcStatus.Proto() p.Message = err.Error() return status.FromProto(p), true } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/stream.go b/.ci/providerlint/vendor/google.golang.org/grpc/stream.go index de32a759714..b14b2fbea2e 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/stream.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/stream.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" "google.golang.org/grpc/encoding" + "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/balancerload" "google.golang.org/grpc/internal/binarylog" "google.golang.org/grpc/internal/channelz" @@ -54,7 +55,7 @@ import ( // status package, or be one of the context errors. Otherwise, gRPC will use // codes.Unknown as the status code and err.Error() as the status message of the // RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error +type StreamHandler func(srv any, stream ServerStream) error // StreamDesc represents a streaming RPC service's method specification. Used // on the server when registering services and on the client when initiating @@ -79,9 +80,9 @@ type Stream interface { // Deprecated: See ClientStream and ServerStream documentation instead. Context() context.Context // Deprecated: See ClientStream and ServerStream documentation instead. - SendMsg(m interface{}) error + SendMsg(m any) error // Deprecated: See ClientStream and ServerStream documentation instead. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // ClientStream defines the client-side behavior of a streaming RPC. @@ -90,7 +91,9 @@ type Stream interface { // status package. type ClientStream interface { // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. + // is any. It blocks if the metadata is not ready to read. If the metadata + // is nil and the error is also nil, then the stream was terminated without + // headers, and the status can be discovered by calling RecvMsg. Header() (metadata.MD, error) // Trailer returns the trailer metadata from the server, if there is any. // It must only be called after stream.CloseAndRecv has returned, or @@ -126,7 +129,7 @@ type ClientStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the stream completes successfully. On // any other error, the stream is aborted and the error contains the RPC @@ -135,7 +138,7 @@ type ClientStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // NewStream creates a new Stream for the client side. This is typically @@ -155,11 +158,6 @@ type ClientStream interface { // If none of the above happen, a goroutine and a context will be leaked, and grpc // will not call the optionally-configured stats handler with a stats.End message. func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { - if err := cc.idlenessMgr.onCallBegin(); err != nil { - return nil, err - } - defer cc.idlenessMgr.onCallEnd() - // allow interceptor to see all applicable call options, which means those // configured as defaults from dial option as well as per-call options opts = combine(cc.dopts.callOptions, opts) @@ -176,6 +174,16 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth } func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + // Start tracking the RPC for idleness purposes. This is where a stream is + // created for both streaming and unary RPCs, and hence is a good place to + // track active RPC count. + if err := cc.idlenessMgr.OnCallBegin(); err != nil { + return nil, err + } + // Add a calloption, to decrement the active call count, that gets executed + // when the RPC completes. + opts = append([]CallOption{OnFinish(func(error) { cc.idlenessMgr.OnCallEnd() })}, opts...) + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { // validate md if err := imetadata.Validate(md); err != nil { @@ -433,7 +441,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error) ctx = trace.NewContext(ctx, trInfo.tr) } - if cs.cc.parsedTarget.URL.Scheme == "xds" { + if cs.cc.parsedTarget.URL.Scheme == internal.GRPCResolverSchemeExtraMetadata { // Add extra metadata (metadata that will be added by transport) to context // so the balancer can see them. ctx = grpcutil.WithExtraMetadata(ctx, metadata.Pairs( @@ -788,23 +796,24 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func()) func (cs *clientStream) Header() (metadata.MD, error) { var m metadata.MD - noHeader := false err := cs.withRetry(func(a *csAttempt) error { var err error m, err = a.s.Header() - if err == transport.ErrNoHeaders { - noHeader = true - return nil - } return toRPCErr(err) }, cs.commitAttemptLocked) + if m == nil && err == nil { + // The stream ended with success. Finish the clientStream. + err = io.EOF + } + if err != nil { cs.finish(err) - return nil, err + // Do not return the error. The user should get it by calling Recv(). + return nil, nil } - if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && !noHeader { + if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged && m != nil { // Only log if binary log is on and header has not been logged, and // there is actually headers to log. logEntry := &binarylog.ServerHeader{ @@ -820,6 +829,7 @@ func (cs *clientStream) Header() (metadata.MD, error) { binlog.Log(cs.ctx, logEntry) } } + return m, nil } @@ -860,7 +870,7 @@ func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error cs.buffer = append(cs.buffer, op) } -func (cs *clientStream) SendMsg(m interface{}) (err error) { +func (cs *clientStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -904,7 +914,7 @@ func (cs *clientStream) SendMsg(m interface{}) (err error) { return err } -func (cs *clientStream) RecvMsg(m interface{}) error { +func (cs *clientStream) RecvMsg(m any) error { if len(cs.binlogs) != 0 && !cs.serverHeaderBinlogged { // Call Header() to binary log header if it's not already logged. cs.Header() @@ -928,24 +938,6 @@ func (cs *clientStream) RecvMsg(m interface{}) error { if err != nil || !cs.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. cs.finish(err) - - if len(cs.binlogs) != 0 { - // finish will not log Trailer. Log Trailer here. - logEntry := &binarylog.ServerTrailer{ - OnClientSide: true, - Trailer: cs.Trailer(), - Err: err, - } - if logEntry.Err == io.EOF { - logEntry.Err = nil - } - if peer, ok := peer.FromContext(cs.Context()); ok { - logEntry.PeerAddr = peer.Addr - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, logEntry) - } - } } return err } @@ -1001,18 +993,30 @@ func (cs *clientStream) finish(err error) { } } } + cs.mu.Unlock() - // For binary logging. only log cancel in finish (could be caused by RPC ctx - // canceled or ClientConn closed). Trailer will be logged in RecvMsg. - // - // Only one of cancel or trailer needs to be logged. In the cases where - // users don't call RecvMsg, users must have already canceled the RPC. - if len(cs.binlogs) != 0 && status.Code(err) == codes.Canceled { - c := &binarylog.Cancel{ - OnClientSide: true, - } - for _, binlog := range cs.binlogs { - binlog.Log(cs.ctx, c) + // Only one of cancel or trailer needs to be logged. + if len(cs.binlogs) != 0 { + switch err { + case errContextCanceled, errContextDeadline, ErrClientConnClosing: + c := &binarylog.Cancel{ + OnClientSide: true, + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, c) + } + default: + logEntry := &binarylog.ServerTrailer{ + OnClientSide: true, + Trailer: cs.Trailer(), + Err: err, + } + if peer, ok := peer.FromContext(cs.Context()); ok { + logEntry.PeerAddr = peer.Addr + } + for _, binlog := range cs.binlogs { + binlog.Log(cs.ctx, logEntry) + } } } if err == nil { @@ -1028,7 +1032,7 @@ func (cs *clientStream) finish(err error) { cs.cancel() } -func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { +func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error { cs := a.cs if a.trInfo != nil { a.mu.Lock() @@ -1055,7 +1059,7 @@ func (a *csAttempt) sendMsg(m interface{}, hdr, payld, data []byte) error { return nil } -func (a *csAttempt) recvMsg(m interface{}, payInfo *payloadInfo) (err error) { +func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) { cs := a.cs if len(a.statsHandlers) != 0 && payInfo == nil { payInfo = &payloadInfo{} @@ -1348,7 +1352,7 @@ func (as *addrConnStream) Context() context.Context { return as.s.Context() } -func (as *addrConnStream) SendMsg(m interface{}) (err error) { +func (as *addrConnStream) SendMsg(m any) (err error) { defer func() { if err != nil && err != io.EOF { // Call finish on the client stream for errors generated by this SendMsg @@ -1393,7 +1397,7 @@ func (as *addrConnStream) SendMsg(m interface{}) (err error) { return nil } -func (as *addrConnStream) RecvMsg(m interface{}) (err error) { +func (as *addrConnStream) RecvMsg(m any) (err error) { defer func() { if err != nil || !as.desc.ServerStreams { // err != nil or non-server-streaming indicates end of stream. @@ -1512,7 +1516,7 @@ type ServerStream interface { // // It is not safe to modify the message after calling SendMsg. Tracing // libraries and stats handlers may use the message lazily. - SendMsg(m interface{}) error + SendMsg(m any) error // RecvMsg blocks until it receives a message into m or the stream is // done. It returns io.EOF when the client has performed a CloseSend. On // any non-EOF error, the stream is aborted and the error contains the @@ -1521,7 +1525,7 @@ type ServerStream interface { // It is safe to have a goroutine calling SendMsg and another goroutine // calling RecvMsg on the same stream at the same time, but it is not // safe to call RecvMsg on the same stream in different goroutines. - RecvMsg(m interface{}) error + RecvMsg(m any) error } // serverStream implements a server side Stream. @@ -1602,7 +1606,7 @@ func (ss *serverStream) SetTrailer(md metadata.MD) { ss.s.SetTrailer(md) } -func (ss *serverStream) SendMsg(m interface{}) (err error) { +func (ss *serverStream) SendMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1610,7 +1614,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1677,7 +1681,7 @@ func (ss *serverStream) SendMsg(m interface{}) (err error) { return nil } -func (ss *serverStream) RecvMsg(m interface{}) (err error) { +func (ss *serverStream) RecvMsg(m any) (err error) { defer func() { if ss.trInfo != nil { ss.mu.Lock() @@ -1685,7 +1689,7 @@ func (ss *serverStream) RecvMsg(m interface{}) (err error) { if err == nil { ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) ss.trInfo.tr.SetError() } } @@ -1757,7 +1761,7 @@ func MethodFromServerStream(stream ServerStream) (string, bool) { // prepareMsg returns the hdr, payload and data // using the compressors passed or using the // passed preparedmsg -func prepareMsg(m interface{}, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { +func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) { if preparedMsg, ok := m.(*PreparedMsg); ok { return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/tap/tap.go b/.ci/providerlint/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40e4..07f01257688 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/tap/tap.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/trace.go b/.ci/providerlint/vendor/google.golang.org/grpc/trace.go index 07a2d26b3e7..9ded79321ba 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/trace.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/trace.go @@ -97,8 +97,8 @@ func truncate(x string, l int) string { // payload represents an RPC request or response payload. type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message + sent bool // whether this is an outgoing payload + msg any // e.g. a proto.Message // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? } @@ -111,7 +111,7 @@ func (p payload) String() string { type fmtStringer struct { format string - a []interface{} + a []any } func (f *fmtStringer) String() string { diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/version.go b/.ci/providerlint/vendor/google.golang.org/grpc/version.go index 12a5a9d00c8..a04793aeb51 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/version.go +++ b/.ci/providerlint/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.57.1" +const Version = "1.60.0" diff --git a/.ci/providerlint/vendor/google.golang.org/grpc/vet.sh b/.ci/providerlint/vendor/google.golang.org/grpc/vet.sh index a8e4732b3d2..896dc38f506 100644 --- a/.ci/providerlint/vendor/google.golang.org/grpc/vet.sh +++ b/.ci/providerlint/vendor/google.golang.org/grpc/vet.sh @@ -35,7 +35,6 @@ if [[ "$1" = "-install" ]]; then # Install the pinned versions as defined in module tools. pushd ./test/tools go install \ - golang.org/x/lint/golint \ golang.org/x/tools/cmd/goimports \ honnef.co/go/tools/cmd/staticcheck \ github.com/client9/misspell/cmd/misspell @@ -77,12 +76,19 @@ fi not grep 'func Test[^(]' *_test.go not grep 'func Test[^(]' test/*.go +# - Check for typos in test function names +git grep 'func (s) ' -- "*_test.go" | not grep -v 'func (s) Test' +git grep 'func [A-Z]' -- "*_test.go" | not grep -v 'func Test\|Benchmark\|Example' + # - Do not import x/net/context. not git grep -l 'x/net/context' -- "*.go" # - Do not import math/rand for real library code. Use internal/grpcrand for # thread safety. -git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^stress\|grpcrand\|^benchmark\|wrr_test' +git grep -l '"math/rand"' -- "*.go" 2>&1 | not grep -v '^examples\|^interop/stress\|grpcrand\|^benchmark\|wrr_test' + +# - Do not use "interface{}"; use "any" instead. +git grep -l 'interface{}' -- "*.go" 2>&1 | not grep -v '\.pb\.go\|protoc-gen-go-grpc' # - Do not call grpclog directly. Use grpclog.Component instead. git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpclog.F' --or -e 'grpclog.V' -- "*.go" | not grep -v '^grpclog/component.go\|^internal/grpctest/tlogger_test.go' @@ -90,13 +96,15 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' misspell -error . -# - gofmt, goimports, golint (with exceptions for generated code), go vet, -# go mod tidy. +# - gofmt, goimports, go vet, go mod tidy. # Perform these checks on each module inside gRPC. for MOD_FILE in $(find . -name 'go.mod'); do MOD_DIR=$(dirname ${MOD_FILE}) @@ -104,105 +112,81 @@ for MOD_FILE in $(find . -name 'go.mod'); do go vet -all ./... | fail_on_output gofmt -s -d -l . 2>&1 | fail_on_output goimports -l . 2>&1 | not grep -vE "\.pb\.go" - golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:" - go mod tidy -compat=1.17 + go mod tidy -compat=1.19 git status --porcelain 2>&1 | fail_on_output || \ (git status; git --no-pager diff; exit 1) popd done # - Collection of static analysis checks -# -# TODO(dfawley): don't use deprecated functions in examples or first-party -# plugins. -# TODO(dfawley): enable ST1019 (duplicate imports) but allow for protobufs. SC_OUT="$(mktemp)" -staticcheck -go 1.19 -checks 'inherit,-ST1015,-ST1019,-SA1019' ./... > "${SC_OUT}" || true -# Error if anything other than deprecation warnings are printed. -not grep -v "is deprecated:.*SA1019" "${SC_OUT}" -# Only ignore the following deprecated types/fields/functions. -not grep -Fv '.CredsBundle -.HeaderMap -.Metadata is deprecated: use Attributes -.NewAddress -.NewServiceConfig -.Type is deprecated: use Attributes -BuildVersion is deprecated -balancer.ErrTransientFailure -balancer.Picker -extDesc.Filename is deprecated -github.com/golang/protobuf/jsonpb is deprecated -grpc.CallCustomCodec -grpc.Code -grpc.Compressor -grpc.CustomCodec -grpc.Decompressor -grpc.MaxMsgSize -grpc.MethodConfig -grpc.NewGZIPCompressor -grpc.NewGZIPDecompressor -grpc.RPCCompressor -grpc.RPCDecompressor -grpc.ServiceConfig -grpc.WithCompressor -grpc.WithDecompressor -grpc.WithDialer -grpc.WithMaxMsgSize -grpc.WithServiceConfig -grpc.WithTimeout -http.CloseNotifier -info.SecurityVersion -proto is deprecated -proto.InternalMessageInfo is deprecated -proto.EnumName is deprecated -proto.ErrInternalBadWireType is deprecated -proto.FileDescriptor is deprecated -proto.Marshaler is deprecated -proto.MessageType is deprecated -proto.RegisterEnum is deprecated -proto.RegisterFile is deprecated -proto.RegisterType is deprecated -proto.RegisterExtension is deprecated -proto.RegisteredExtension is deprecated -proto.RegisteredExtensions is deprecated -proto.RegisterMapType is deprecated -proto.Unmarshaler is deprecated -resolver.Backend -resolver.GRPCLB +staticcheck -go 1.19 -checks 'all' ./... > "${SC_OUT}" || true + +# Error for anything other than checks that need exclusions. +grep -v "(ST1000)" "${SC_OUT}" | grep -v "(SA1019)" | grep -v "(ST1003)" | not grep -v "(ST1019)\|\(other import of\)" + +# Exclude underscore checks for generated code. +grep "(ST1003)" "${SC_OUT}" | not grep -v '\(.pb.go:\)\|\(code_string_test.go:\)' + +# Error for duplicate imports not including grpc protos. +grep "(ST1019)\|\(other import of\)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +channelz/grpc_channelz_v1" +go-control-plane/envoy +grpclb/grpc_lb_v1" +health/grpc_health_v1" +interop/grpc_testing" +orca/v3" +proto/grpc_gcp" +proto/grpc_lookup_v1" +reflection/grpc_reflection_v1" +reflection/grpc_reflection_v1alpha" +XXXXX PleaseIgnoreUnused' + +# Error for any package comments not in generated code. +grep "(ST1000)" "${SC_OUT}" | not grep -v "\.pb\.go:" + +# Only ignore the following deprecated types/fields/functions and exclude +# generated code. +grep "(SA1019)" "${SC_OUT}" | not grep -Fv 'XXXXX PleaseIgnoreUnused +XXXXX Protobuf related deprecation errors: +"github.com/golang/protobuf +.pb.go: +: ptypes. +proto.RegisterType +XXXXX gRPC internal usage deprecation errors: +"google.golang.org/grpc +: grpc. +: v1alpha. +: v1alphareflectionpb. +BalancerAttributes is deprecated: +CredsBundle is deprecated: +Metadata is deprecated: use Attributes instead. +NewSubConn is deprecated: +OverrideServerName is deprecated: +RemoveSubConn is deprecated: +SecurityVersion is deprecated: Target is deprecated: Use the Target field in the BuildOptions instead. -xxx_messageInfo_ -' "${SC_OUT}" - -# - special golint on package comments. -lint_package_comment_per_package() { - # Number of files in this go package. - fileCount=$(go list -f '{{len .GoFiles}}' $1) - if [ ${fileCount} -eq 0 ]; then - return 0 - fi - # Number of package errors generated by golint. - lintPackageCommentErrorsCount=$(golint --min_confidence 0 $1 | grep -c "should have a package comment") - # golint complains about every file that's missing the package comment. If the - # number of files for this package is greater than the number of errors, there's - # at least one file with package comment, good. Otherwise, fail. - if [ ${fileCount} -le ${lintPackageCommentErrorsCount} ]; then - echo "Package $1 (with ${fileCount} files) is missing package comment" - return 1 - fi -} -lint_package_comment() { - set +ex - - count=0 - for i in $(go list ./...); do - lint_package_comment_per_package "$i" - ((count += $?)) - done - - set -ex - return $count -} -lint_package_comment +UpdateAddresses is deprecated: +UpdateSubConnState is deprecated: +balancer.ErrTransientFailure is deprecated: +grpc/reflection/v1alpha/reflection.proto +XXXXX xDS deprecated fields we support +.ExactMatch +.PrefixMatch +.SafeRegexMatch +.SuffixMatch +GetContainsMatch +GetExactMatch +GetMatchSubjectAltNames +GetPrefixMatch +GetSafeRegexMatch +GetSuffixMatch +GetTlsCertificateCertificateProviderInstance +GetValidationContextCertificateProviderInstance +XXXXX TODO: Remove the below deprecation usages: +CloseNotifier +Roots.Subjects +XXXXX PleaseIgnoreUnused' echo SUCCESS diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index adf9ee493d6..b632043e690 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -239,10 +239,11 @@ github.com/hashicorp/go-hclog # github.com/hashicorp/go-multierror v1.1.1 ## explicit; go 1.13 github.com/hashicorp/go-multierror -# github.com/hashicorp/go-plugin v1.5.1 +# github.com/hashicorp/go-plugin v1.6.0 ## explicit; go 1.17 github.com/hashicorp/go-plugin github.com/hashicorp/go-plugin/internal/cmdrunner +github.com/hashicorp/go-plugin/internal/grpcmux github.com/hashicorp/go-plugin/internal/plugin github.com/hashicorp/go-plugin/runner # github.com/hashicorp/go-uuid v1.0.3 @@ -251,7 +252,7 @@ github.com/hashicorp/go-uuid # github.com/hashicorp/go-version v1.6.0 ## explicit github.com/hashicorp/go-version -# github.com/hashicorp/hc-install v0.6.1 +# github.com/hashicorp/hc-install v0.6.2 ## explicit; go 1.18 github.com/hashicorp/hc-install github.com/hashicorp/hc-install/checkpoint @@ -279,10 +280,10 @@ github.com/hashicorp/logutils ## explicit; go 1.18 github.com/hashicorp/terraform-exec/internal/version github.com/hashicorp/terraform-exec/tfexec -# github.com/hashicorp/terraform-json v0.17.1 +# github.com/hashicorp/terraform-json v0.18.0 ## explicit; go 1.18 github.com/hashicorp/terraform-json -# github.com/hashicorp/terraform-plugin-go v0.19.0 +# github.com/hashicorp/terraform-plugin-go v0.20.0 ## explicit; go 1.20 github.com/hashicorp/terraform-plugin-go/internal/logging github.com/hashicorp/terraform-plugin-go/tfprotov5 @@ -307,7 +308,7 @@ github.com/hashicorp/terraform-plugin-log/internal/hclogutils github.com/hashicorp/terraform-plugin-log/internal/logging github.com/hashicorp/terraform-plugin-log/tflog github.com/hashicorp/terraform-plugin-log/tfsdklog -# github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 +# github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 ## explicit; go 1.20 github.com/hashicorp/terraform-plugin-sdk/v2/diag github.com/hashicorp/terraform-plugin-sdk/v2/helper/id @@ -329,14 +330,14 @@ github.com/hashicorp/terraform-plugin-sdk/v2/internal/tfdiags github.com/hashicorp/terraform-plugin-sdk/v2/meta github.com/hashicorp/terraform-plugin-sdk/v2/plugin github.com/hashicorp/terraform-plugin-sdk/v2/terraform -# github.com/hashicorp/terraform-registry-address v0.2.2 +# github.com/hashicorp/terraform-registry-address v0.2.3 ## explicit; go 1.19 github.com/hashicorp/terraform-registry-address # github.com/hashicorp/terraform-svchost v0.1.1 ## explicit; go 1.19 github.com/hashicorp/terraform-svchost -# github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d -## explicit +# github.com/hashicorp/yamux v0.1.1 +## explicit; go 1.15 github.com/hashicorp/yamux # github.com/mattn/go-colorable v0.1.12 ## explicit; go 1.13 @@ -366,8 +367,8 @@ github.com/oklog/run ## explicit github.com/vmihailenco/msgpack github.com/vmihailenco/msgpack/codes -# github.com/vmihailenco/msgpack/v5 v5.3.5 -## explicit; go 1.11 +# github.com/vmihailenco/msgpack/v5 v5.4.1 +## explicit; go 1.19 github.com/vmihailenco/msgpack/v5 github.com/vmihailenco/msgpack/v5/msgpcode # github.com/vmihailenco/tagparser/v2 v2.0.0 @@ -385,29 +386,28 @@ github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json github.com/zclconf/go-cty/cty/set -# golang.org/x/crypto v0.15.0 +# golang.org/x/crypto v0.16.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b golang.org/x/crypto/cast5 golang.org/x/crypto/hkdf golang.org/x/crypto/sha3 -# golang.org/x/mod v0.13.0 +# golang.org/x/mod v0.14.0 ## explicit; go 1.18 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.17.0 -## explicit; go 1.17 -golang.org/x/net/context +# golang.org/x/net v0.18.0 +## explicit; go 1.18 golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna golang.org/x/net/internal/timeseries golang.org/x/net/trace -# golang.org/x/sys v0.14.0 +# golang.org/x/sys v0.15.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/execabs @@ -451,7 +451,7 @@ golang.org/x/tools/internal/tokeninternal golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/txtar -# google.golang.org/appengine v1.6.7 +# google.golang.org/appengine v1.6.8 ## explicit; go 1.11 google.golang.org/appengine google.golang.org/appengine/datastore @@ -464,11 +464,11 @@ google.golang.org/appengine/internal/datastore google.golang.org/appengine/internal/log google.golang.org/appengine/internal/modules google.golang.org/appengine/internal/remote_api -# google.golang.org/genproto/googleapis/rpc v0.0.0-20230525234030-28d5490b6b19 +# google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.57.1 -## explicit; go 1.17 +# google.golang.org/grpc v1.60.0 +## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes google.golang.org/grpc/backoff @@ -500,10 +500,12 @@ google.golang.org/grpc/internal/grpclog google.golang.org/grpc/internal/grpcrand google.golang.org/grpc/internal/grpcsync google.golang.org/grpc/internal/grpcutil +google.golang.org/grpc/internal/idle google.golang.org/grpc/internal/metadata google.golang.org/grpc/internal/pretty google.golang.org/grpc/internal/resolver google.golang.org/grpc/internal/resolver/dns +google.golang.org/grpc/internal/resolver/dns/internal google.golang.org/grpc/internal/resolver/passthrough google.golang.org/grpc/internal/resolver/unix google.golang.org/grpc/internal/serviceconfig @@ -518,6 +520,7 @@ google.golang.org/grpc/reflection google.golang.org/grpc/reflection/grpc_reflection_v1 google.golang.org/grpc/reflection/grpc_reflection_v1alpha google.golang.org/grpc/resolver +google.golang.org/grpc/resolver/dns google.golang.org/grpc/serviceconfig google.golang.org/grpc/stats google.golang.org/grpc/status From 5da0ce1c1d23e2bd23bd4b1b0e294655f632c1a3 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 12:00:07 -0500 Subject: [PATCH 304/438] Tweak CHANGELOG entry. --- .changelog/30721.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.changelog/30721.txt b/.changelog/30721.txt index 464706991f9..e551608ad23 100644 --- a/.changelog/30721.txt +++ b/.changelog/30721.txt @@ -1,3 +1,3 @@ ```release-note:enhancement -resource/aws_dms_replication_task: Allow in-place migration between DMS instances +resource/aws_dms_replication_task: Remove [ForceNew](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#forcenew) from `replication_instance_arn`, allowing in-place migration between DMS instances ``` \ No newline at end of file From ac5377bd5126601e715428ccae71f99385bb69a8 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 12:01:48 -0500 Subject: [PATCH 305/438] r/aws_dms_replication_task: Tidy up Create. --- internal/service/dms/replication_task.go | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index b9814130f91..03aa4ea2741 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -117,12 +117,11 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - taskId := d.Get("replication_task_id").(string) - - request := &dms.CreateReplicationTaskInput{ + taskID := d.Get("replication_task_id").(string) + input := &dms.CreateReplicationTaskInput{ MigrationType: aws.String(d.Get("migration_type").(string)), ReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), - ReplicationTaskIdentifier: aws.String(taskId), + ReplicationTaskIdentifier: aws.String(taskID), SourceEndpointArn: aws.String(d.Get("source_endpoint_arn").(string)), TableMappings: aws.String(d.Get("table_mappings").(string)), Tags: getTagsIn(ctx), @@ -130,33 +129,32 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, } if v, ok := d.GetOk("cdc_start_position"); ok { - request.CdcStartPosition = aws.String(v.(string)) + input.CdcStartPosition = aws.String(v.(string)) } if v, ok := d.GetOk("cdc_start_time"); ok { v := v.(string) if t, err := time.Parse(time.RFC3339, v); err != nil { - request.CdcStartTime = aws.Time(time.Unix(flex.StringValueToInt64Value(v), 0)) + input.CdcStartTime = aws.Time(time.Unix(flex.StringValueToInt64Value(v), 0)) } else { - request.CdcStartTime = aws.Time(t) + input.CdcStartTime = aws.Time(t) } } if v, ok := d.GetOk("replication_task_settings"); ok { - request.ReplicationTaskSettings = aws.String(v.(string)) + input.ReplicationTaskSettings = aws.String(v.(string)) } - log.Println("[DEBUG] DMS create replication task:", request) + _, err := conn.CreateReplicationTaskWithContext(ctx, input) - _, err := conn.CreateReplicationTaskWithContext(ctx, request) if err != nil { - return sdkdiag.AppendErrorf(diags, "creating DMS Replication Task (%s): %s", taskId, err) + return sdkdiag.AppendErrorf(diags, "creating DMS Replication Task (%s): %s", taskID, err) } - d.SetId(taskId) + d.SetId(taskID) if err := waitReplicationTaskReady(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { - return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) to become available: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) create: %s", d.Id(), err) } if d.Get("start_replication_task").(bool) { From abe4bc1c3f92f3c6f09d1407efd2df87da0aa761 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 12:12:55 -0500 Subject: [PATCH 306/438] r/aws_dms_replication_task: Tidy up Read. --- internal/service/dms/endpoint.go | 17 +- internal/service/dms/replication_task.go | 243 ++++++------------ .../dms/replication_task_data_source.go | 25 +- 3 files changed, 106 insertions(+), 179 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index c150396f45c..3beb2834d26 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1681,7 +1681,7 @@ func resourceEndpointSetState(d *schema.ResourceData, endpoint *dms.Endpoint) er } func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) error { - tasks, err := FindReplicationTasksByEndpointARN(ctx, conn, arn) + tasks, err := findReplicationTasksByEndpointARN(ctx, conn, arn) if err != nil { return err } @@ -1706,7 +1706,7 @@ func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrati return nil, err } - tasks, err := FindReplicationTasksByEndpointARN(ctx, conn, arn) + tasks, err := findReplicationTasksByEndpointARN(ctx, conn, arn) if err != nil { return nil, err } @@ -1774,6 +1774,19 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat return nil } +func findReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) ([]*dms.ReplicationTask, error) { + input := &dms.DescribeReplicationTasksInput{ + Filters: []*dms.Filter{ + { + Name: aws.String("endpoint-arn"), + Values: aws.StringSlice([]string{arn}), + }, + }, + } + + return findReplicationTasks(ctx, conn, input) +} + func flattenOpenSearchSettings(settings *dms.ElasticsearchSettings) []map[string]interface{} { if settings == nil { return []map[string]interface{}{} diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 03aa4ea2741..85659eb880a 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -5,7 +5,6 @@ package dms import ( "context" - "encoding/json" "fmt" "log" "time" @@ -182,27 +181,17 @@ func resourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, me return sdkdiag.AppendErrorf(diags, "reading DMS Replication Task (%s): %s", d.Id(), err) } - if task == nil { - return sdkdiag.AppendErrorf(diags, "reading DMS Replication Task (%s): empty output", d.Id()) - } - d.Set("cdc_start_position", task.CdcStartPosition) d.Set("migration_type", task.MigrationType) d.Set("replication_instance_arn", task.ReplicationInstanceArn) d.Set("replication_task_arn", task.ReplicationTaskArn) d.Set("replication_task_id", task.ReplicationTaskIdentifier) + d.Set("replication_task_settings", task.ReplicationTaskSettings) d.Set("source_endpoint_arn", task.SourceEndpointArn) d.Set("status", task.Status) d.Set("table_mappings", task.TableMappings) d.Set("target_endpoint_arn", task.TargetEndpointArn) - settings, err := replicationTaskRemoveReadOnlySettings(aws.StringValue(task.ReplicationTaskSettings)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading DMS Replication Task (%s): %s", d.Id(), err) - } - - d.Set("replication_task_settings", settings) - return diags } @@ -352,176 +341,40 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, return diags } -func replicationTaskRemoveReadOnlySettings(settings string) (*string, error) { - var settingsData map[string]interface{} - if err := json.Unmarshal([]byte(settings), &settingsData); err != nil { - return nil, err - } - - controlTablesSettings, ok := settingsData["ControlTablesSettings"].(map[string]interface{}) - if ok { - delete(controlTablesSettings, "historyTimeslotInMinutes") - } - - logging, ok := settingsData["Logging"].(map[string]interface{}) - if ok { - delete(logging, "EnableLogContext") - delete(logging, "CloudWatchLogGroup") - delete(logging, "CloudWatchLogStream") - } - - cleanedSettings, err := json.Marshal(settingsData) - if err != nil { - return nil, err - } - - cleanedSettingsString := string(cleanedSettings) - return &cleanedSettingsString, nil -} - -func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { - log.Printf("[DEBUG] Starting DMS Replication Task: (%s)", id) - - task, err := FindReplicationTaskByID(ctx, conn, id) - if err != nil { - return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) - } - - if task == nil { - return fmt.Errorf("reading DMS Replication Task (%s): empty output", id) - } - - startReplicationTaskType := dms.StartReplicationTaskTypeValueStartReplication - if aws.StringValue(task.Status) != replicationTaskStatusReady { - startReplicationTaskType = dms.StartReplicationTaskTypeValueResumeProcessing - } - - _, err = conn.StartReplicationTaskWithContext(ctx, &dms.StartReplicationTaskInput{ - ReplicationTaskArn: task.ReplicationTaskArn, - StartReplicationTaskType: aws.String(startReplicationTaskType), - }) - - if err != nil { - return fmt.Errorf("starting DMS Replication Task (%s): %w", id, err) - } - - err = waitReplicationTaskRunning(ctx, conn, id) - if err != nil { - return fmt.Errorf("waiting for DMS Replication Task (%s) start: %w", id, err) - } - - return nil -} - -func stopReplicationTask(ctx context.Context, id string, conn *dms.DatabaseMigrationService) error { - log.Printf("[DEBUG] Stopping DMS Replication Task: %s", id) - - task, err := FindReplicationTaskByID(ctx, conn, id) - if err != nil { - return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) - } - - if task == nil { - return fmt.Errorf("reading DMS Replication Task (%s): empty output", id) - } - - _, err = conn.StopReplicationTaskWithContext(ctx, &dms.StopReplicationTaskInput{ - ReplicationTaskArn: task.ReplicationTaskArn, - }) - - if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "is currently not running") { - return nil - } - - if err != nil { - return fmt.Errorf("stopping DMS Replication Task (%s): %w", id, err) - } - - err = waitReplicationTaskStopped(ctx, conn, id) - if err != nil { - return fmt.Errorf("waiting for DMS Replication Task (%s) stop: %w", id, err) - } - - return nil -} - func FindReplicationTaskByID(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { input := &dms.DescribeReplicationTasksInput{ Filters: []*dms.Filter{ { Name: aws.String("replication-task-id"), - Values: []*string{aws.String(id)}, // Must use d.Id() to work with import. + Values: aws.StringSlice([]string{id}), }, }, } - return FindReplicationTask(ctx, conn, input) -} - -func FindReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) (*dms.ReplicationTask, error) { - var results []*dms.ReplicationTask - err := conn.DescribeReplicationTasksPagesWithContext(ctx, input, func(page *dms.DescribeReplicationTasksOutput, lastPage bool) bool { - if page == nil { - return !lastPage - } - - for _, task := range page.ReplicationTasks { - if task == nil { - continue - } - results = append(results, task) - } - - return !lastPage - }) + return findReplicationTask(ctx, conn, input) +} - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } +func findReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) (*dms.ReplicationTask, error) { + output, err := findReplicationTasks(ctx, conn, input) if err != nil { return nil, err } - if len(results) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - if count := len(results); count > 1 { - return nil, tfresource.NewTooManyResultsError(count, input) - } - - return results[0], nil + return tfresource.AssertSinglePtrResult(output) } -func FindReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMigrationService, arn string) ([]*dms.ReplicationTask, error) { - input := &dms.DescribeReplicationTasksInput{ - Filters: []*dms.Filter{ - { - Name: aws.String("endpoint-arn"), - Values: []*string{aws.String(arn)}, - }, - }, - } - - var results []*dms.ReplicationTask +func findReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationService, input *dms.DescribeReplicationTasksInput) ([]*dms.ReplicationTask, error) { + var output []*dms.ReplicationTask err := conn.DescribeReplicationTasksPagesWithContext(ctx, input, func(page *dms.DescribeReplicationTasksOutput, lastPage bool) bool { if page == nil { return !lastPage } - for _, task := range page.ReplicationTasks { - if task == nil { - continue - } - - switch aws.StringValue(task.Status) { - case replicationTaskStatusRunning, replicationTaskStatusStarting: - results = append(results, task) + for _, v := range page.ReplicationTasks { + if v != nil { + output = append(output, v) } } @@ -539,7 +392,11 @@ func FindReplicationTasksByEndpointARN(ctx context.Context, conn *dms.DatabaseMi return nil, err } - return results, nil + if err != nil { + return nil, err + } + + return output, nil } func statusReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) retry.StateRefreshFunc { @@ -675,3 +532,69 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS return err } + +func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { + log.Printf("[DEBUG] Starting DMS Replication Task: (%s)", id) + + task, err := FindReplicationTaskByID(ctx, conn, id) + if err != nil { + return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) + } + + if task == nil { + return fmt.Errorf("reading DMS Replication Task (%s): empty output", id) + } + + startReplicationTaskType := dms.StartReplicationTaskTypeValueStartReplication + if aws.StringValue(task.Status) != replicationTaskStatusReady { + startReplicationTaskType = dms.StartReplicationTaskTypeValueResumeProcessing + } + + _, err = conn.StartReplicationTaskWithContext(ctx, &dms.StartReplicationTaskInput{ + ReplicationTaskArn: task.ReplicationTaskArn, + StartReplicationTaskType: aws.String(startReplicationTaskType), + }) + + if err != nil { + return fmt.Errorf("starting DMS Replication Task (%s): %w", id, err) + } + + err = waitReplicationTaskRunning(ctx, conn, id) + if err != nil { + return fmt.Errorf("waiting for DMS Replication Task (%s) start: %w", id, err) + } + + return nil +} + +func stopReplicationTask(ctx context.Context, id string, conn *dms.DatabaseMigrationService) error { + log.Printf("[DEBUG] Stopping DMS Replication Task: %s", id) + + task, err := FindReplicationTaskByID(ctx, conn, id) + if err != nil { + return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) + } + + if task == nil { + return fmt.Errorf("reading DMS Replication Task (%s): empty output", id) + } + + _, err = conn.StopReplicationTaskWithContext(ctx, &dms.StopReplicationTaskInput{ + ReplicationTaskArn: task.ReplicationTaskArn, + }) + + if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "is currently not running") { + return nil + } + + if err != nil { + return fmt.Errorf("stopping DMS Replication Task (%s): %w", id, err) + } + + err = waitReplicationTaskStopped(ctx, conn, id) + if err != nil { + return fmt.Errorf("waiting for DMS Replication Task (%s) stop: %w", id, err) + } + + return nil +} diff --git a/internal/service/dms/replication_task_data_source.go b/internal/service/dms/replication_task_data_source.go index ccf8a4400c3..2eca0674151 100644 --- a/internal/service/dms/replication_task_data_source.go +++ b/internal/service/dms/replication_task_data_source.go @@ -10,9 +10,8 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/names" ) // @SDKDataSource("aws_dms_replication_task") @@ -65,19 +64,15 @@ func DataSourceReplicationTask() *schema.Resource { Type: schema.TypeString, Computed: true, }, + "tags": tftags.TagsSchemaComputed(), "target_endpoint_arn": { Type: schema.TypeString, Computed: true, }, - "tags": tftags.TagsSchemaComputed(), }, } } -const ( - DSNameReplicationTask = "Replication Task Data Source" -) - func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics @@ -88,8 +83,9 @@ func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, taskID := d.Get("replication_task_id").(string) task, err := FindReplicationTaskByID(ctx, conn, taskID) + if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, DSNameReplicationTask, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading DMS Replication Task (%s): %s", taskID, err) } d.SetId(aws.StringValue(task.ReplicationTaskIdentifier)) @@ -98,28 +94,23 @@ func dataSourceReplicationTaskRead(ctx context.Context, d *schema.ResourceData, d.Set("replication_instance_arn", task.ReplicationInstanceArn) d.Set("replication_task_arn", task.ReplicationTaskArn) d.Set("replication_task_id", task.ReplicationTaskIdentifier) + d.Set("replication_task_settings", task.ReplicationTaskSettings) d.Set("source_endpoint_arn", task.SourceEndpointArn) d.Set("status", task.Status) d.Set("table_mappings", task.TableMappings) d.Set("target_endpoint_arn", task.TargetEndpointArn) - settings, err := replicationTaskRemoveReadOnlySettings(aws.StringValue(task.ReplicationTaskSettings)) - if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, DSNameReplicationTask, d.Id(), err) - } - - d.Set("replication_task_settings", settings) - tags, err := listTags(ctx, conn, aws.StringValue(task.ReplicationTaskArn)) + if err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionReading, DSNameReplicationTask, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "listing DMS Replication Task (%s) tags: %s", d.Id(), err) } tags = tags.IgnoreAWS().IgnoreConfig(ignoreTagsConfig) //lintignore:AWSR002 if err := d.Set("tags", tags.RemoveDefaultConfig(defaultTagsConfig).Map()); err != nil { - return create.AppendDiagError(diags, names.DMS, create.ErrActionSetting, DSNameReplicationTask, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "setting tags: %s", err) } return diags From c99231dc343b48e91e11dbe2c0df9eb856e2ac70 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 12:15:04 -0500 Subject: [PATCH 307/438] r/aws_dms_replication_task: Prevent erroneous diffs on `replication_task_settings`. --- .changelog/34356.txt | 4 ++++ internal/service/dms/replication_task.go | 9 +++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.changelog/34356.txt b/.changelog/34356.txt index bf5c6d0a2a4..5ecbdb78edd 100644 --- a/.changelog/34356.txt +++ b/.changelog/34356.txt @@ -1,3 +1,7 @@ ```release-note:bug resource/aws_dms_replication_config: Prevent erroneous diffs on `replication_settings` ``` + +```release-note:bug +resource/aws_dms_replication_task: Prevent erroneous diffs on `replication_task_settings` +``` \ No newline at end of file diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 85659eb880a..26230a1c44d 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -72,10 +72,11 @@ func ResourceReplicationTask() *schema.Resource { ValidateFunc: validReplicationTaskID, }, "replication_task_settings": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringIsJSON, - DiffSuppressFunc: verify.SuppressEquivalentJSONDiffs, + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringIsJSON, + DiffSuppressFunc: suppressEquivalentTaskSettings, + DiffSuppressOnRefresh: true, }, "source_endpoint_arn": { Type: schema.TypeString, From e75ba7fc5691b1d40377835af45f2dc6b776e828 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 12:18:54 -0500 Subject: [PATCH 308/438] dms: Tidy up 'startReplicationTask'. --- internal/service/dms/replication_task.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 26230a1c44d..39ed1deecc5 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -535,33 +535,33 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS } func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { - log.Printf("[DEBUG] Starting DMS Replication Task: (%s)", id) - task, err := FindReplicationTaskByID(ctx, conn, id) + if err != nil { return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) } - if task == nil { - return fmt.Errorf("reading DMS Replication Task (%s): empty output", id) + taskStatus := aws.StringValue(task.Status) + if taskStatus == replicationTaskStatusRunning { + return nil } startReplicationTaskType := dms.StartReplicationTaskTypeValueStartReplication - if aws.StringValue(task.Status) != replicationTaskStatusReady { + if taskStatus != replicationTaskStatusReady { startReplicationTaskType = dms.StartReplicationTaskTypeValueResumeProcessing } - - _, err = conn.StartReplicationTaskWithContext(ctx, &dms.StartReplicationTaskInput{ + input := &dms.StartReplicationTaskInput{ ReplicationTaskArn: task.ReplicationTaskArn, StartReplicationTaskType: aws.String(startReplicationTaskType), - }) + } + + _, err = conn.StartReplicationTaskWithContext(ctx, input) if err != nil { return fmt.Errorf("starting DMS Replication Task (%s): %w", id, err) } - err = waitReplicationTaskRunning(ctx, conn, id) - if err != nil { + if err := waitReplicationTaskRunning(ctx, conn, id); err != nil { return fmt.Errorf("waiting for DMS Replication Task (%s) start: %w", id, err) } From 940841aae369031491d6d265f3e9cee94e542467 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 12:23:42 -0500 Subject: [PATCH 309/438] dms: Tidy up 'stopReplicationTask'. --- internal/service/dms/endpoint.go | 2 +- internal/service/dms/replication_config.go | 2 -- internal/service/dms/replication_task.go | 31 +++++++++++++--------- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 3beb2834d26..d73e76677a8 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1716,7 +1716,7 @@ func stopEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrati rtID := aws.StringValue(task.ReplicationTaskIdentifier) switch aws.StringValue(task.Status) { case replicationTaskStatusRunning: - err := stopReplicationTask(ctx, rtID, conn) + err := stopReplicationTask(ctx, conn, rtID) if err != nil { return stoppedTasks, err diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index adda23214e9..7a43bb07b78 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -557,7 +557,6 @@ func startReplication(ctx context.Context, conn *dms.DatabaseMigrationService, a } replicationStatus := aws.StringValue(replication.Status) - if replicationStatus == replicationStatusRunning { return nil } @@ -596,7 +595,6 @@ func stopReplication(ctx context.Context, conn *dms.DatabaseMigrationService, ar } replicationStatus := aws.StringValue(replication.Status) - if replicationStatus == replicationStatusStopped || replicationStatus == replicationStatusCreated || replicationStatus == replicationStatusFailed { return nil } diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 39ed1deecc5..f75bc4a5598 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -233,7 +233,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, status := d.Get("status").(string) if status == replicationTaskStatusRunning { log.Println("[DEBUG] stopping DMS replication task:", input) - if err := stopReplicationTask(ctx, d.Id(), conn); err != nil { + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -264,7 +264,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, status := d.Get("status").(string) if status == replicationTaskStatusRunning { log.Println("[DEBUG] stopping DMS replication task:", input) - if err := stopReplicationTask(ctx, d.Id(), conn); err != nil { + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -296,7 +296,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } } else { if status == replicationTaskStatusRunning { - if err := stopReplicationTask(ctx, d.Id(), conn); err != nil { + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -311,7 +311,7 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, conn := meta.(*conns.AWSClient).DMSConn(ctx) if status := d.Get("status").(string); status == replicationTaskStatusRunning { - if err := stopReplicationTask(ctx, d.Id(), conn); err != nil { + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } } @@ -568,21 +568,27 @@ func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServic return nil } -func stopReplicationTask(ctx context.Context, id string, conn *dms.DatabaseMigrationService) error { - log.Printf("[DEBUG] Stopping DMS Replication Task: %s", id) - +func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { task, err := FindReplicationTaskByID(ctx, conn, id) + + if tfresource.NotFound(err) { + return nil + } + if err != nil { return fmt.Errorf("reading DMS Replication Task (%s): %w", id, err) } - if task == nil { - return fmt.Errorf("reading DMS Replication Task (%s): empty output", id) + taskStatus := aws.StringValue(task.Status) + if taskStatus != replicationTaskStatusRunning { + return nil } - _, err = conn.StopReplicationTaskWithContext(ctx, &dms.StopReplicationTaskInput{ + input := &dms.StopReplicationTaskInput{ ReplicationTaskArn: task.ReplicationTaskArn, - }) + } + + _, err = conn.StopReplicationTaskWithContext(ctx, input) if tfawserr.ErrMessageContains(err, dms.ErrCodeInvalidResourceStateFault, "is currently not running") { return nil @@ -592,8 +598,7 @@ func stopReplicationTask(ctx context.Context, id string, conn *dms.DatabaseMigra return fmt.Errorf("stopping DMS Replication Task (%s): %w", id, err) } - err = waitReplicationTaskStopped(ctx, conn, id) - if err != nil { + if err := waitReplicationTaskStopped(ctx, conn, id); err != nil { return fmt.Errorf("waiting for DMS Replication Task (%s) stop: %w", id, err) } From 956aff98b565e9168e5d0aedac7248a61a71d00a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 13:12:06 -0500 Subject: [PATCH 310/438] r/aws_dms_replication_task: Tidy up Delete. --- internal/service/dms/replication_task.go | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index f75bc4a5598..367a111a1be 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -310,19 +310,14 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - if status := d.Get("status").(string); status == replicationTaskStatusRunning { - if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendFromErr(diags, err) - } + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendFromErr(diags, err) } - input := &dms.DeleteReplicationTaskInput{ + log.Printf("[DEBUG] Deleting DMS Replication Task: %s", d.Id()) + _, err := conn.DeleteReplicationTaskWithContext(ctx, &dms.DeleteReplicationTaskInput{ ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), - } - - log.Printf("[DEBUG] DMS delete replication task: %#v", input) - - _, err := conn.DeleteReplicationTaskWithContext(ctx, input) + }) if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { return diags @@ -333,10 +328,7 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, } if err := waitReplicationTaskDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { - if tfawserr.ErrCodeEquals(err, dms.ErrCodeResourceNotFoundFault) { - return diags - } - return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) to be deleted: %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) delete: %s", d.Id(), err) } return diags From e3b06898f4722a0808a63a8804a931c966596e60 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 13:17:00 -0500 Subject: [PATCH 311/438] r/aws_dms_replication_task: Tidy up Update. --- internal/service/dms/replication_task.go | 59 +++++++++--------------- 1 file changed, 22 insertions(+), 37 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 367a111a1be..0970afe2d64 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -200,10 +200,14 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, var diags diag.Diagnostics conn := meta.(*conns.AWSClient).DMSConn(ctx) - if d.HasChangesExcept("tags", "tags_all", "start_replication_task", "replication_instance_arn") { + if d.HasChangesExcept("tags", "tags_all", "replication_instance_arn", "start_replication_task") { + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + input := &dms.ModifyReplicationTaskInput{ - ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), MigrationType: aws.String(d.Get("migration_type").(string)), + ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), TableMappings: aws.String(d.Get("table_mappings").(string)), } @@ -223,25 +227,15 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChange("replication_task_settings") { - if v, ok := d.Get("replication_task_settings").(string); ok && v != "" { - input.ReplicationTaskSettings = aws.String(v) - } else { - input.ReplicationTaskSettings = nil - } - } - - status := d.Get("status").(string) - if status == replicationTaskStatusRunning { - log.Println("[DEBUG] stopping DMS replication task:", input) - if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendFromErr(diags, err) + if v, ok := d.GetOk("replication_task_settings"); ok { + input.ReplicationTaskSettings = aws.String(v.(string)) } } - log.Println("[DEBUG] updating DMS replication task:", input) _, err := conn.ModifyReplicationTaskWithContext(ctx, input) + if err != nil { - return sdkdiag.AppendErrorf(diags, "updating DMS Replication Task (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Task (%s): %s", d.Id(), err) } if err := waitReplicationTaskModified(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { @@ -249,28 +243,24 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.Get("start_replication_task").(bool) { - err := startReplicationTask(ctx, conn, d.Id()) - if err != nil { + if err := startReplicationTask(ctx, conn, d.Id()); err != nil { return sdkdiag.AppendFromErr(diags, err) } } } if d.HasChange("replication_instance_arn") { + if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + input := &dms.MoveReplicationTaskInput{ ReplicationTaskArn: aws.String(d.Get("replication_task_arn").(string)), TargetReplicationInstanceArn: aws.String(d.Get("replication_instance_arn").(string)), } - status := d.Get("status").(string) - if status == replicationTaskStatusRunning { - log.Println("[DEBUG] stopping DMS replication task:", input) - if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - } - log.Println("[DEBUG] moving DMS replication task:", input) _, err := conn.MoveReplicationTaskWithContext(ctx, input) + if err != nil { return sdkdiag.AppendErrorf(diags, "moving DMS Replication Task (%s): %s", d.Id(), err) } @@ -287,19 +277,14 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, } if d.HasChanges("start_replication_task") { - status := d.Get("status").(string) + var f func(context.Context, *dms.DatabaseMigrationService, string) error if d.Get("start_replication_task").(bool) { - if status != replicationTaskStatusRunning { - if err := startReplicationTask(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - } + f = startReplicationTask } else { - if status == replicationTaskStatusRunning { - if err := stopReplicationTask(ctx, conn, d.Id()); err != nil { - return sdkdiag.AppendFromErr(diags, err) - } - } + f = stopReplicationTask + } + if err := f(ctx, conn, d.Id()); err != nil { + return sdkdiag.AppendFromErr(diags, err) } } From 6020ccffb6cd3f630207ec5c73ec8498dcb3d4fe Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 13:24:07 -0500 Subject: [PATCH 312/438] r/aws_dms_replication_task: Tidy up waiters. --- internal/service/dms/endpoint.go | 2 +- internal/service/dms/replication_config.go | 2 +- internal/service/dms/replication_task.go | 127 ++++++++++++++------- 3 files changed, 89 insertions(+), 42 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index d73e76677a8..ca8b596a833 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1692,7 +1692,7 @@ func steadyEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigra case replicationTaskStatusRunning, replicationTaskStatusFailed, replicationTaskStatusReady, replicationTaskStatusStopped: continue case replicationTaskStatusCreating, replicationTaskStatusDeleting, replicationTaskStatusModifying, replicationTaskStatusStopping, replicationTaskStatusStarting: - if err := waitReplicationTaskSteady(ctx, conn, rtID); err != nil { + if _, err := waitReplicationTaskSteady(ctx, conn, rtID); err != nil { return err } } diff --git a/internal/service/dms/replication_config.go b/internal/service/dms/replication_config.go index 7a43bb07b78..cd04271011b 100644 --- a/internal/service/dms/replication_config.go +++ b/internal/service/dms/replication_config.go @@ -291,7 +291,7 @@ func resourceReplicationConfigUpdate(ctx context.Context, d *schema.ResourceData _, err := conn.ModifyReplicationConfigWithContext(ctx, input) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating DMS Replication Config (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Config (%s): %s", d.Id(), err) } if d.Get("start_replication").(bool) { diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index 0970afe2d64..ad33727891d 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -5,6 +5,7 @@ package dms import ( "context" + "errors" "fmt" "log" "time" @@ -153,7 +154,7 @@ func resourceReplicationTaskCreate(ctx context.Context, d *schema.ResourceData, d.SetId(taskID) - if err := waitReplicationTaskReady(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { + if _, err := waitReplicationTaskReady(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) create: %s", d.Id(), err) } @@ -238,7 +239,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "modifying DMS Replication Task (%s): %s", d.Id(), err) } - if err := waitReplicationTaskModified(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitReplicationTaskModified(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) update: %s", d.Id(), err) } @@ -265,7 +266,7 @@ func resourceReplicationTaskUpdate(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "moving DMS Replication Task (%s): %s", d.Id(), err) } - if err := waitReplicationTaskMoved(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { + if _, err := waitReplicationTaskMoved(ctx, conn, d.Id(), d.Timeout(schema.TimeoutUpdate)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) update: %s", d.Id(), err) } @@ -312,7 +313,7 @@ func resourceReplicationTaskDelete(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "deleting DMS Replication Task (%s): %s", d.Id(), err) } - if err := waitReplicationTaskDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { + if _, err := waitReplicationTaskDeleted(ctx, conn, d.Id(), d.Timeout(schema.TimeoutDelete)); err != nil { return sdkdiag.AppendErrorf(diags, "waiting for DMS Replication Task (%s) delete: %s", d.Id(), err) } @@ -393,11 +394,20 @@ func statusReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServi } } -const ( - replicationTaskRunningTimeout = 5 * time.Minute -) +func setLastReplicationTaskError(err error, replication *dms.ReplicationTask) { + var errs []error + + if v := aws.StringValue(replication.LastFailureMessage); v != "" { + errs = append(errs, errors.New(v)) + } + if v := aws.StringValue(replication.StopReason); v != "" { + errs = append(errs, errors.New(v)) + } + + tfresource.SetLastError(err, errors.Join(errs...)) +} -func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { +func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusDeleting}, Target: []string{}, @@ -407,13 +417,17 @@ func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigration Delay: 30 * time.Second, // Wait 30 secs before starting } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) - return err + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } + + return nil, err } -func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { +func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusModifying}, Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, @@ -423,13 +437,17 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio Delay: 30 * time.Second, // Wait 30 secs before starting } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) - return err + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } + + return nil, err } -func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { +func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusModifying, replicationTaskStatusMoving}, Target: []string{replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusFailed}, @@ -439,13 +457,17 @@ func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationSe Delay: 30 * time.Second, // Wait 30 secs before starting } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) - return err + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } + + return nil, err } -func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) error { +func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationService, id string, timeout time.Duration) (*dms.ReplicationTask, error) { stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusCreating}, Target: []string{replicationTaskStatusReady}, @@ -455,60 +477,85 @@ func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationSe Delay: 30 * time.Second, // Wait 30 secs before starting } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } - return err + return nil, err } -func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { +func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { + const ( + timeout = 5 * time.Minute + ) stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusStarting}, Target: []string{replicationTaskStatusRunning}, Refresh: statusReplicationTask(ctx, conn, id), - Timeout: replicationTaskRunningTimeout, + Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 30 * time.Second, // Wait 30 secs before starting } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } - return err + return nil, err } -func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { +func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { + const ( + timeout = 5 * time.Minute + ) stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusStopping, replicationTaskStatusRunning}, Target: []string{replicationTaskStatusStopped}, Refresh: statusReplicationTask(ctx, conn, id), - Timeout: replicationTaskRunningTimeout, + Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 60 * time.Second, // Wait 60 secs before starting ContinuousTargetOccurence: 2, } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } - return err + return nil, err } -func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { +func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationService, id string) (*dms.ReplicationTask, error) { + const ( + timeout = 5 * time.Minute + ) stateConf := &retry.StateChangeConf{ Pending: []string{replicationTaskStatusCreating, replicationTaskStatusDeleting, replicationTaskStatusModifying, replicationTaskStatusStopping, replicationTaskStatusStarting}, Target: []string{replicationTaskStatusFailed, replicationTaskStatusReady, replicationTaskStatusStopped, replicationTaskStatusRunning}, Refresh: statusReplicationTask(ctx, conn, id), - Timeout: replicationTaskRunningTimeout, + Timeout: timeout, MinTimeout: 10 * time.Second, Delay: 60 * time.Second, // Wait 60 secs before starting ContinuousTargetOccurence: 2, } - // Wait, catching any errors - _, err := stateConf.WaitForStateContext(ctx) + outputRaw, err := stateConf.WaitForStateContext(ctx) + + if output, ok := outputRaw.(*dms.ReplicationTask); ok { + setLastReplicationTaskError(err, output) + return output, err + } - return err + return nil, err } func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService, id string) error { @@ -538,7 +585,7 @@ func startReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationServic return fmt.Errorf("starting DMS Replication Task (%s): %w", id, err) } - if err := waitReplicationTaskRunning(ctx, conn, id); err != nil { + if _, err := waitReplicationTaskRunning(ctx, conn, id); err != nil { return fmt.Errorf("waiting for DMS Replication Task (%s) start: %w", id, err) } @@ -575,7 +622,7 @@ func stopReplicationTask(ctx context.Context, conn *dms.DatabaseMigrationService return fmt.Errorf("stopping DMS Replication Task (%s): %w", id, err) } - if err := waitReplicationTaskStopped(ctx, conn, id); err != nil { + if _, err := waitReplicationTaskStopped(ctx, conn, id); err != nil { return fmt.Errorf("waiting for DMS Replication Task (%s) stop: %w", id, err) } From 22541453f4e5f9bd9a4089d7cd2001dbfde33f43 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 15:42:27 -0500 Subject: [PATCH 313/438] r/aws_dms_replication_task: Tidy up acceptance tests. --- internal/service/dms/replication_task.go | 14 +- .../dms/replication_task_data_source_test.go | 4 +- internal/service/dms/replication_task_test.go | 347 ++---------------- 3 files changed, 41 insertions(+), 324 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index ad33727891d..ac30f9124f1 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -414,7 +414,7 @@ func waitReplicationTaskDeleted(ctx context.Context, conn *dms.DatabaseMigration Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -434,7 +434,7 @@ func waitReplicationTaskModified(ctx context.Context, conn *dms.DatabaseMigratio Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -454,7 +454,7 @@ func waitReplicationTaskMoved(ctx context.Context, conn *dms.DatabaseMigrationSe Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -474,7 +474,7 @@ func waitReplicationTaskReady(ctx context.Context, conn *dms.DatabaseMigrationSe Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -497,7 +497,7 @@ func waitReplicationTaskRunning(ctx context.Context, conn *dms.DatabaseMigration Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 30 * time.Second, // Wait 30 secs before starting + Delay: 30 * time.Second, } outputRaw, err := stateConf.WaitForStateContext(ctx) @@ -520,7 +520,7 @@ func waitReplicationTaskStopped(ctx context.Context, conn *dms.DatabaseMigration Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 60 * time.Second, // Wait 60 secs before starting + Delay: 60 * time.Second, ContinuousTargetOccurence: 2, } @@ -544,7 +544,7 @@ func waitReplicationTaskSteady(ctx context.Context, conn *dms.DatabaseMigrationS Refresh: statusReplicationTask(ctx, conn, id), Timeout: timeout, MinTimeout: 10 * time.Second, - Delay: 60 * time.Second, // Wait 60 secs before starting + Delay: 60 * time.Second, ContinuousTargetOccurence: 2, } diff --git a/internal/service/dms/replication_task_data_source_test.go b/internal/service/dms/replication_task_data_source_test.go index 0fcde0c5746..b3e487e2821 100644 --- a/internal/service/dms/replication_task_data_source_test.go +++ b/internal/service/dms/replication_task_data_source_test.go @@ -37,9 +37,7 @@ func TestAccDMSReplicationTaskDataSource_basic(t *testing.T) { } func testAccReplicationTaskDataSourceConfig_basic(rName string) string { - return acctest.ConfigCompose( - replicationTaskConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccReplicationTaskConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_task" "test" { migration_type = "full-load" replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index fc896b1b470..d40ca48f347 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -391,10 +391,6 @@ func testAccCheckReplicationTaskExists(ctx context.Context, n string) resource.T return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No ID is set") - } - conn := acctest.Provider.Meta().(*conns.AWSClient).DMSConn(ctx) _, err := tfdms.FindReplicationTaskByID(ctx, conn, rs.Primary.ID) @@ -422,49 +418,18 @@ func testAccCheckReplicationTaskDestroy(ctx context.Context) resource.TestCheckF return err } - return fmt.Errorf("DMS replication task (%s) still exists", rs.Primary.ID) + return fmt.Errorf("DMS Replication Task %s still exists", rs.Primary.ID) } return nil } } -func replicationTaskConfigBase(rName string) string { - return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), - fmt.Sprintf(` +func testAccReplicationTaskConfig_base(rName string) string { + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` data "aws_partition" "current" {} - data "aws_region" "current" {} -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - cidr_block = "10.1.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - cidr_block = "10.1.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - vpc_id = aws_vpc.test.id - - tags = { - Name = "%[1]s-2" - } -} - resource "aws_dms_endpoint" "source" { database_name = %[1]q endpoint_id = "%[1]s-source" @@ -490,7 +455,7 @@ resource "aws_dms_endpoint" "target" { resource "aws_dms_replication_subnet_group" "test" { replication_subnet_group_id = %[1]q replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + subnet_ids = aws_subnet.test[*].id } resource "aws_dms_replication_instance" "test" { @@ -506,9 +471,7 @@ resource "aws_dms_replication_instance" "test" { } func testAccReplicationTaskConfig_basic(rName, tags string) string { - return acctest.ConfigCompose( - replicationTaskConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccReplicationTaskConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_task" "test" { migration_type = "full-load" replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn @@ -528,9 +491,7 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_update(rName, migType string, memLimitTotal int, ruleName string) string { - return acctest.ConfigCompose( - replicationTaskConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccReplicationTaskConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_task" "test" { migration_type = %[2]q replication_instance_arn = aws_dms_replication_instance.test.replication_instance_arn @@ -549,9 +510,7 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_cdcStartPosition(rName, cdcStartPosition string) string { - return acctest.ConfigCompose( - replicationTaskConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccReplicationTaskConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_task" "test" { cdc_start_position = %[1]q migration_type = "cdc" @@ -566,173 +525,7 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_start(rName string, startTask bool, ruleName string) string { - return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), - fmt.Sprintf(` -data "aws_partition" "current" {} - -data "aws_region" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - cidr_block = "10.1.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - cidr_block = "10.1.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - vpc_id = aws_vpc.test.id - - tags = { - Name = "%[1]s-2" - } -} - -resource "aws_security_group" "test" { - vpc_id = aws_vpc.test.id - - ingress { - protocol = -1 - self = true - from_port = 0 - to_port = 0 - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_db_subnet_group" "test" { - name = %[1]q - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] - - tags = { - Name = %[1]q - } -} - -data "aws_rds_engine_version" "default" { - engine = "aurora-mysql" -} - -data "aws_rds_orderable_db_instance" "test" { - engine = data.aws_rds_engine_version.default.engine - engine_version = data.aws_rds_engine_version.default.version - preferred_instance_classes = ["db.t3.small", "db.t3.medium", "db.t3.large"] -} - -resource "aws_rds_cluster_parameter_group" "test" { - name = "%[1]s-pg-cluster" - family = data.aws_rds_engine_version.default.parameter_group_family - description = "DMS cluster parameter group" - - parameter { - name = "binlog_format" - value = "ROW" - apply_method = "pending-reboot" - } - - parameter { - name = "binlog_row_image" - value = "Full" - apply_method = "pending-reboot" - } - - parameter { - name = "binlog_checksum" - value = "NONE" - apply_method = "pending-reboot" - } -} - -resource "aws_rds_cluster" "source" { - cluster_identifier = "%[1]s-aurora-cluster-source" - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - database_name = "tftest" - master_username = "tftest" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true - vpc_security_group_ids = [aws_security_group.test.id] - db_subnet_group_name = aws_db_subnet_group.test.name - db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.test.name -} - -resource "aws_rds_cluster_instance" "source" { - identifier = "%[1]s-source-primary" - cluster_identifier = aws_rds_cluster.source.id - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - instance_class = data.aws_rds_orderable_db_instance.test.instance_class - db_subnet_group_name = aws_db_subnet_group.test.name -} - -resource "aws_rds_cluster" "target" { - cluster_identifier = "%[1]s-aurora-cluster-target" - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - database_name = "tftest" - master_username = "tftest" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true - vpc_security_group_ids = [aws_security_group.test.id] - db_subnet_group_name = aws_db_subnet_group.test.name -} - -resource "aws_rds_cluster_instance" "target" { - identifier = "%[1]s-target-primary" - cluster_identifier = aws_rds_cluster.target.id - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - instance_class = data.aws_rds_orderable_db_instance.test.instance_class - db_subnet_group_name = aws_db_subnet_group.test.name -} - -resource "aws_dms_endpoint" "source" { - database_name = "tftest" - endpoint_id = "%[1]s-source" - endpoint_type = "source" - engine_name = "aurora" - server_name = aws_rds_cluster.source.endpoint - port = 3306 - username = "tftest" - password = "mustbeeightcharaters" -} - -resource "aws_dms_endpoint" "target" { - database_name = "tftest" - endpoint_id = "%[1]s-target" - endpoint_type = "target" - engine_name = "aurora" - server_name = aws_rds_cluster.target.endpoint - port = 3306 - username = "tftest" - password = "mustbeeightcharaters" -} - -resource "aws_dms_replication_subnet_group" "test" { - replication_subnet_group_id = %[1]q - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] -} - + return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_instance" "test" { allocated_storage = 5 auto_minor_version_upgrade = true @@ -766,39 +559,9 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_s3ToRDS(rName string) string { - return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), - testAccS3EndpointConfig_base(rName), - fmt.Sprintf(` -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - cidr_block = "10.1.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - cidr_block = "10.1.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - vpc_id = aws_vpc.test.id - - tags = { - Name = "%[1]s-2" - } -} - + return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), testAccS3EndpointConfig_base(rName), fmt.Sprintf(` resource "aws_security_group" "test" { + name = %[1]q vpc_id = aws_vpc.test.id ingress { @@ -814,11 +577,15 @@ resource "aws_security_group" "test" { protocol = "-1" cidr_blocks = ["0.0.0.0/0"] } + + tags = { + Name = %[1]q + } } resource "aws_db_subnet_group" "test" { name = %[1]q - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + subnet_ids = aws_subnet.test[*].id tags = { Name = %[1]q @@ -828,7 +595,7 @@ resource "aws_db_subnet_group" "test" { resource "aws_dms_replication_subnet_group" "test" { replication_subnet_group_id = %[1]q replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + subnet_ids = aws_subnet.test[*].id } resource "aws_s3_bucket" "test" { @@ -949,9 +716,7 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_cdcStartTime(rName, cdcStartPosition string) string { - return acctest.ConfigCompose( - replicationTaskConfigBase(rName), - fmt.Sprintf(` + return acctest.ConfigCompose(testAccReplicationTaskConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_task" "test" { cdc_start_time = %[1]q migration_type = "cdc" @@ -966,67 +731,7 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_move(rName, arn string) string { - return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), - fmt.Sprintf(` -data "aws_partition" "current" {} -data "aws_region" "current" {} -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - tags = { - Name = %[1]q - } -} -resource "aws_subnet" "test1" { - cidr_block = "10.1.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - vpc_id = aws_vpc.test.id - tags = { - Name = %[1]q - } -} -resource "aws_subnet" "test2" { - cidr_block = "10.1.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - vpc_id = aws_vpc.test.id - tags = { - Name = "%[1]s-2" - } -} -resource "aws_dms_endpoint" "source" { - database_name = %[1]q - endpoint_id = "%[1]s-source" - endpoint_type = "source" - engine_name = "aurora" - server_name = "tf-test-cluster.cluster-xxxxxxx.${data.aws_region.current.name}.rds.${data.aws_partition.current.dns_suffix}" - port = 3306 - username = "tftest" - password = "tftest" -} -resource "aws_dms_endpoint" "target" { - database_name = %[1]q - endpoint_id = "%[1]s-target" - endpoint_type = "target" - engine_name = "aurora" - server_name = "tf-test-cluster.cluster-xxxxxxx.${data.aws_region.current.name}.rds.${data.aws_partition.current.dns_suffix}" - port = 3306 - username = "tftest" - password = "tftest" -} -resource "aws_dms_replication_subnet_group" "test" { - replication_subnet_group_id = %[1]q - replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] -} -resource "aws_dms_replication_instance" "test" { - allocated_storage = 5 - auto_minor_version_upgrade = true - replication_instance_class = "dms.c4.large" - replication_instance_id = %[1]q - preferred_maintenance_window = "sun:00:30-sun:02:30" - publicly_accessible = false - replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id -} + return acctest.ConfigCompose(testAccReplicationTaskConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_instance" "test2" { allocated_storage = 5 auto_minor_version_upgrade = true @@ -1036,5 +741,19 @@ resource "aws_dms_replication_instance" "test2" { publicly_accessible = false replication_subnet_group_id = aws_dms_replication_subnet_group.test.replication_subnet_group_id } -`, rName)) + +resource "aws_dms_replication_task" "test" { + migration_type = "full-load" + replication_instance_arn = %[2]s + replication_task_id = %[1]q + replication_task_settings = "{\"BeforeImageSettings\":null,\"FailTaskWhenCleanTaskResourceFailed\":false,\"ChangeProcessingDdlHandlingPolicy\":{\"HandleSourceTableAltered\":true,\"HandleSourceTableDropped\":true,\"HandleSourceTableTruncated\":true},\"ChangeProcessingTuning\":{\"BatchApplyMemoryLimit\":500,\"BatchApplyPreserveTransaction\":true,\"BatchApplyTimeoutMax\":30,\"BatchApplyTimeoutMin\":1,\"BatchSplitSize\":0,\"CommitTimeout\":1,\"MemoryKeepTime\":60,\"MemoryLimitTotal\":1024,\"MinTransactionSize\":1000,\"StatementCacheSize\":50},\"CharacterSetSettings\":null,\"ControlTablesSettings\":{\"ControlSchema\":\"\",\"FullLoadExceptionTableEnabled\":false,\"HistoryTableEnabled\":false,\"HistoryTimeslotInMinutes\":5,\"StatusTableEnabled\":false,\"SuspendedTablesTableEnabled\":false},\"ErrorBehavior\":{\"ApplyErrorDeletePolicy\":\"IGNORE_RECORD\",\"ApplyErrorEscalationCount\":0,\"ApplyErrorEscalationPolicy\":\"LOG_ERROR\",\"ApplyErrorFailOnTruncationDdl\":false,\"ApplyErrorInsertPolicy\":\"LOG_ERROR\",\"ApplyErrorUpdatePolicy\":\"LOG_ERROR\",\"DataErrorEscalationCount\":0,\"DataErrorEscalationPolicy\":\"SUSPEND_TABLE\",\"DataErrorPolicy\":\"LOG_ERROR\",\"DataTruncationErrorPolicy\":\"LOG_ERROR\",\"EventErrorPolicy\":\"IGNORE\",\"FailOnNoTablesCaptured\":false,\"FailOnTransactionConsistencyBreached\":false,\"FullLoadIgnoreConflicts\":true,\"RecoverableErrorCount\":-1,\"RecoverableErrorInterval\":5,\"RecoverableErrorStopRetryAfterThrottlingMax\":false,\"RecoverableErrorThrottling\":true,\"RecoverableErrorThrottlingMax\":1800,\"TableErrorEscalationCount\":0,\"TableErrorEscalationPolicy\":\"STOP_TASK\",\"TableErrorPolicy\":\"SUSPEND_TABLE\"},\"FullLoadSettings\":{\"CommitRate\":10000,\"CreatePkAfterFullLoad\":false,\"MaxFullLoadSubTasks\":8,\"StopTaskCachedChangesApplied\":false,\"StopTaskCachedChangesNotApplied\":false,\"TargetTablePrepMode\":\"DROP_AND_CREATE\",\"TransactionConsistencyTimeout\":600},\"Logging\":{\"EnableLogging\":false,\"LogComponents\":[{\"Id\":\"TRANSFORMATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_UNLOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"IO\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_LOAD\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"PERFORMANCE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SOURCE_CAPTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"SORTER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"REST_SERVER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"VALIDATOR_EXT\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TARGET_APPLY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TASK_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"TABLES_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"METADATA_MANAGER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_FACTORY\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMON\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"ADDONS\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"DATA_STRUCTURE\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"COMMUNICATION\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"},{\"Id\":\"FILE_TRANSFER\",\"Severity\":\"LOGGER_SEVERITY_DEFAULT\"}]},\"LoopbackPreventionSettings\":null,\"PostProcessingRules\":null,\"StreamBufferSettings\":{\"CtrlStreamBufferSizeInMB\":5,\"StreamBufferCount\":3,\"StreamBufferSizeInMB\":8},\"TargetMetadata\":{\"BatchApplyEnabled\":false,\"FullLobMode\":false,\"InlineLobMaxSize\":0,\"LimitedSizeLobMode\":true,\"LoadMaxFileSize\":0,\"LobChunkSize\":0,\"LobMaxSize\":32,\"ParallelApplyBufferSize\":0,\"ParallelApplyQueuesPerThread\":0,\"ParallelApplyThreads\":0,\"ParallelLoadBufferSize\":0,\"ParallelLoadQueuesPerThread\":0,\"ParallelLoadThreads\":0,\"SupportLobs\":true,\"TargetSchema\":\"\",\"TaskRecoveryTableEnabled\":false},\"TTSettings\":{\"EnableTT\":false,\"TTRecordSettings\":null,\"TTS3Settings\":null}}" + source_endpoint_arn = aws_dms_endpoint.source.endpoint_arn + table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%%\",\"table-name\":\"%%\"},\"rule-action\":\"include\"}]}" + + tags = { + Name = %[1]q + } + + target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn +`, rName, arn)) } From cca7ce7107d5434e3372e23cb58e0e317c7fe30d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 16:28:09 -0500 Subject: [PATCH 314/438] Fix semgrep 'dgryski.semgrep-go.oddifsequence.odd-sequence-ifs'. --- internal/service/dms/replication_task.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/service/dms/replication_task.go b/internal/service/dms/replication_task.go index ac30f9124f1..fb688f6ff65 100644 --- a/internal/service/dms/replication_task.go +++ b/internal/service/dms/replication_task.go @@ -371,10 +371,6 @@ func findReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrationServic return nil, err } - if err != nil { - return nil, err - } - return output, nil } From 3527c4411f1d7207b402e4fe7e2908702da6680a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 16:33:17 -0500 Subject: [PATCH 315/438] Fix terrafmt error. --- internal/service/dms/replication_task_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index d40ca48f347..00a9aa6764c 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -755,5 +755,6 @@ resource "aws_dms_replication_task" "test" { } target_endpoint_arn = aws_dms_endpoint.target.endpoint_arn +} `, rName, arn)) } From 3f8ceb98420c5c6b46bd8043c12476288d7946f4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 16:37:46 -0500 Subject: [PATCH 316/438] s3: Use AWS SDK for Go v2 exclusively. --- internal/conns/awsclient_gen.go | 5 --- internal/service/s3/service_package.go | 55 ++++++-------------------- names/names_data.csv | 2 +- 3 files changed, 13 insertions(+), 49 deletions(-) diff --git a/internal/conns/awsclient_gen.go b/internal/conns/awsclient_gen.go index 265415a822b..2a7f45c133c 100644 --- a/internal/conns/awsclient_gen.go +++ b/internal/conns/awsclient_gen.go @@ -207,7 +207,6 @@ import ( route53recoverycontrolconfig_sdkv1 "github.com/aws/aws-sdk-go/service/route53recoverycontrolconfig" route53recoveryreadiness_sdkv1 "github.com/aws/aws-sdk-go/service/route53recoveryreadiness" route53resolver_sdkv1 "github.com/aws/aws-sdk-go/service/route53resolver" - s3_sdkv1 "github.com/aws/aws-sdk-go/service/s3" s3outposts_sdkv1 "github.com/aws/aws-sdk-go/service/s3outposts" sagemaker_sdkv1 "github.com/aws/aws-sdk-go/service/sagemaker" schemas_sdkv1 "github.com/aws/aws-sdk-go/service/schemas" @@ -956,10 +955,6 @@ func (c *AWSClient) Route53ResolverConn(ctx context.Context) *route53resolver_sd return errs.Must(conn[*route53resolver_sdkv1.Route53Resolver](ctx, c, names.Route53Resolver, make(map[string]any))) } -func (c *AWSClient) S3Conn(ctx context.Context) *s3_sdkv1.S3 { - return errs.Must(conn[*s3_sdkv1.S3](ctx, c, names.S3, make(map[string]any))) -} - func (c *AWSClient) S3Client(ctx context.Context) *s3_sdkv2.Client { return errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, make(map[string]any))) } diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index dc1ec5a6408..a56ba5a7750 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -6,53 +6,22 @@ package s3 import ( "context" - aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" - retry_sdkv2 "github.com/aws/aws-sdk-go-v2/aws/retry" - s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + "github.com/aws/aws-sdk-go-v2/service/s3" endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" - request_sdkv1 "github.com/aws/aws-sdk-go/aws/request" - session_sdkv1 "github.com/aws/aws-sdk-go/aws/session" - s3_sdkv1 "github.com/aws/aws-sdk-go/service/s3" - tfawserr_sdkv1 "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/names" ) -// NewConn returns a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) NewConn(ctx context.Context, m map[string]any) (*s3_sdkv1.S3, error) { - sess := m["session"].(*session_sdkv1.Session) - config := &aws_sdkv1.Config{ - Endpoint: aws_sdkv1.String(m["endpoint"].(string)), - S3ForcePathStyle: aws_sdkv1.Bool(m["s3_use_path_style"].(bool)), - } - - if v, ok := m["s3_us_east_1_regional_endpoint"]; ok { - config.S3UsEast1RegionalEndpoint = v.(endpoints_sdkv1.S3UsEast1RegionalEndpoint) - } - - return s3_sdkv1.New(sess.Copy(config)), nil -} - -// CustomizeConn customizes a new AWS SDK for Go v1 client for this service package's AWS API. -func (p *servicePackage) CustomizeConn(ctx context.Context, conn *s3_sdkv1.S3) (*s3_sdkv1.S3, error) { - conn.Handlers.Retry.PushBack(func(r *request_sdkv1.Request) { - if tfawserr_sdkv1.ErrMessageContains(r.Error, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { - r.Retryable = aws_sdkv1.Bool(true) - } - }) - - return conn, nil -} - // NewClient returns a new AWS SDK for Go v2 client for this service package's AWS API. -func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3_sdkv2.Client, error) { - cfg := *(config["aws_sdkv2_config"].(*aws_sdkv2.Config)) +func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) (*s3.Client, error) { + cfg := *(config["aws_sdkv2_config"].(*aws.Config)) - return s3_sdkv2.NewFromConfig(cfg, func(o *s3_sdkv2.Options) { + return s3.NewFromConfig(cfg, func(o *s3.Options) { if endpoint := config["endpoint"].(string); endpoint != "" { - o.BaseEndpoint = aws_sdkv2.String(endpoint) + o.BaseEndpoint = aws.String(endpoint) } else if o.Region == names.USEast1RegionID && config["s3_us_east_1_regional_endpoint"].(endpoints_sdkv1.S3UsEast1RegionalEndpoint) != endpoints_sdkv1.RegionalS3UsEast1Endpoint { // Maintain the AWS SDK for Go v1 default of using the global endpoint in us-east-1. // See https://github.com/hashicorp/terraform-provider-aws/issues/33028. @@ -60,11 +29,11 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( } o.UsePathStyle = config["s3_use_path_style"].(bool) - o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws_sdkv2.RetryerV2), retry_sdkv2.IsErrorRetryableFunc(func(err error) aws_sdkv2.Ternary { - if tfawserr_sdkv2.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { - return aws_sdkv2.TrueTernary + o.Retryer = conns.AddIsErrorRetryables(cfg.Retryer().(aws.RetryerV2), retry.IsErrorRetryableFunc(func(err error) aws.Ternary { + if tfawserr.ErrMessageContains(err, errCodeOperationAborted, "A conflicting conditional operation is currently in progress against this resource. Please try again.") { + return aws.TrueTernary } - return aws_sdkv2.UnknownTernary // Delegate to configured Retryer. + return aws.UnknownTernary // Delegate to configured Retryer. })) }), nil } diff --git a/names/names_data.csv b/names/names_data.csv index 1db2a8f79ec..c0d7ce84d0d 100644 --- a/names/names_data.csv +++ b/names/names_data.csv @@ -306,7 +306,7 @@ route53-recovery-cluster,route53recoverycluster,route53recoverycluster,route53re route53-recovery-control-config,route53recoverycontrolconfig,route53recoverycontrolconfig,route53recoverycontrolconfig,,route53recoverycontrolconfig,,,Route53RecoveryControlConfig,Route53RecoveryControlConfig,x,1,,,aws_route53recoverycontrolconfig_,,route53recoverycontrolconfig_,Route 53 Recovery Control Config,Amazon,,,,,,, route53-recovery-readiness,route53recoveryreadiness,route53recoveryreadiness,route53recoveryreadiness,,route53recoveryreadiness,,,Route53RecoveryReadiness,Route53RecoveryReadiness,x,1,,,aws_route53recoveryreadiness_,,route53recoveryreadiness_,Route 53 Recovery Readiness,Amazon,,,,,,, route53resolver,route53resolver,route53resolver,route53resolver,,route53resolver,,,Route53Resolver,Route53Resolver,,1,,aws_route53_resolver_,aws_route53resolver_,,route53_resolver_,Route 53 Resolver,Amazon,,,,,,, -s3api,s3api,s3,s3,,s3,,s3api,S3,S3,x,1,2,aws_(canonical_user_id|s3_bucket|s3_object|s3_directory_bucket),aws_s3_,,s3_bucket;s3_directory_bucket;s3_object;canonical_user_id,S3 (Simple Storage),Amazon,,,,,AWS_S3_ENDPOINT,TF_AWS_S3_ENDPOINT, +s3api,s3api,s3,s3,,s3,,s3api,S3,S3,x,,2,aws_(canonical_user_id|s3_bucket|s3_object|s3_directory_bucket),aws_s3_,,s3_bucket;s3_directory_bucket;s3_object;canonical_user_id,S3 (Simple Storage),Amazon,,,,,AWS_S3_ENDPOINT,TF_AWS_S3_ENDPOINT, s3control,s3control,s3control,s3control,,s3control,,,S3Control,S3Control,,,2,aws_(s3_account_|s3control_|s3_access_),aws_s3control_,,s3control;s3_account_;s3_access_,S3 Control,Amazon,,,,,,, glacier,glacier,glacier,glacier,,glacier,,,Glacier,Glacier,,,2,,aws_glacier_,,glacier_,S3 Glacier,Amazon,,,,,,, s3outposts,s3outposts,s3outposts,s3outposts,,s3outposts,,,S3Outposts,S3Outposts,,1,,,aws_s3outposts_,,s3outposts_,S3 on Outposts,Amazon,,,,,,, From b61de3086baa01ba0f74af48cb94bdf4f18d1fc2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 16:45:52 -0500 Subject: [PATCH 317/438] s3: Generate only AWS SDK for Go v2 tagging code. --- internal/service/s3/generate.go | 3 +- internal/service/s3/tags.go | 74 +++++++++++++++---------------- internal/service/s3/tags_gen.go | 26 +++++------ internal/service/s3/tagsv2_gen.go | 59 ------------------------ 4 files changed, 49 insertions(+), 113 deletions(-) delete mode 100644 internal/service/s3/tagsv2_gen.go diff --git a/internal/service/s3/generate.go b/internal/service/s3/generate.go index bb62eb52883..56c0891e267 100644 --- a/internal/service/s3/generate.go +++ b/internal/service/s3/generate.go @@ -1,8 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -ServiceTagsSlice -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipAWSServiceImp -ServiceTagsSlice -TagsFunc=tagsV2 -KeyValueTagsFunc=keyValueTagsV2 -GetTagsInFunc=getTagsInV2 -SetTagsOutFunc=setTagsOutV2 -- tagsv2_gen.go +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipAWSServiceImp -ServiceTagsSlice -TagsFunc=tags -KeyValueTagsFunc=keyValueTags -GetTagsInFunc=getTagsIn -SetTagsOutFunc=setTagsOut //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/s3/tags.go b/internal/service/s3/tags.go index 8707d4a3ecf..f2f14433b71 100644 --- a/internal/service/s3/tags.go +++ b/internal/service/s3/tags.go @@ -10,14 +10,10 @@ import ( "context" "fmt" - aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" - s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" - s3types_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3/types" - aws_sdkv1 "github.com/aws/aws-sdk-go/aws" - s3_sdkv1 "github.com/aws/aws-sdk-go/service/s3" - s3iface_sdkv1 "github.com/aws/aws-sdk-go/service/s3/s3iface" - tfawserr_sdkv1 "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" ) @@ -25,17 +21,17 @@ import ( // BucketListTags lists S3 bucket tags. // The identifier is the bucket name. -func BucketListTags(ctx context.Context, conn s3iface_sdkv1.S3API, identifier string) (tftags.KeyValueTags, error) { - input := &s3_sdkv1.GetBucketTaggingInput{ - Bucket: aws_sdkv1.String(identifier), +func BucketListTags(ctx context.Context, conn *s3.Client, identifier string, optFns ...func(*s3.Options)) (tftags.KeyValueTags, error) { + input := &s3.GetBucketTaggingInput{ + Bucket: aws.String(identifier), } - output, err := conn.GetBucketTaggingWithContext(ctx, input) + output, err := conn.GetBucketTagging(ctx, input, optFns...) // S3 API Reference (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html) // lists the special error as NoSuchTagSetError, however the existing logic used NoSuchTagSet // and the AWS Go SDK has neither as a constant. - if tfawserr_sdkv1.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError) { return tftags.New(ctx, nil), nil } @@ -43,17 +39,17 @@ func BucketListTags(ctx context.Context, conn s3iface_sdkv1.S3API, identifier st return tftags.New(ctx, nil), err } - return KeyValueTags(ctx, output.TagSet), nil + return keyValueTags(ctx, output.TagSet), nil } // BucketUpdateTags updates S3 bucket tags. // The identifier is the bucket name. -func BucketUpdateTags(ctx context.Context, conn s3iface_sdkv1.S3API, identifier string, oldTagsMap, newTagsMap any) error { +func BucketUpdateTags(ctx context.Context, conn *s3.Client, identifier string, oldTagsMap, newTagsMap any, optFns ...func(*s3.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) // We need to also consider any existing ignored tags. - allTags, err := BucketListTags(ctx, conn, identifier) + allTags, err := BucketListTags(ctx, conn, identifier, optFns...) if err != nil { return fmt.Errorf("listing resource tags (%s): %w", identifier, err) @@ -62,24 +58,24 @@ func BucketUpdateTags(ctx context.Context, conn s3iface_sdkv1.S3API, identifier ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) if len(newTags)+len(ignoredTags) > 0 { - input := &s3_sdkv1.PutBucketTaggingInput{ - Bucket: aws_sdkv1.String(identifier), - Tagging: &s3_sdkv1.Tagging{ - TagSet: Tags(newTags.Merge(ignoredTags)), + input := &s3.PutBucketTaggingInput{ + Bucket: aws.String(identifier), + Tagging: &types.Tagging{ + TagSet: tags(newTags.Merge(ignoredTags)), }, } - _, err := conn.PutBucketTaggingWithContext(ctx, input) + _, err := conn.PutBucketTagging(ctx, input, optFns...) if err != nil { return fmt.Errorf("setting resource tags (%s): %w", identifier, err) } } else if len(oldTags) > 0 && len(ignoredTags) == 0 { - input := &s3_sdkv1.DeleteBucketTaggingInput{ - Bucket: aws_sdkv1.String(identifier), + input := &s3.DeleteBucketTaggingInput{ + Bucket: aws.String(identifier), } - _, err := conn.DeleteBucketTaggingWithContext(ctx, input) + _, err := conn.DeleteBucketTagging(ctx, input, optFns...) if err != nil { return fmt.Errorf("deleting resource tags (%s): %w", identifier, err) @@ -90,15 +86,15 @@ func BucketUpdateTags(ctx context.Context, conn s3iface_sdkv1.S3API, identifier } // ObjectListTags lists S3 object tags. -func ObjectListTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key string, optFns ...func(*s3_sdkv2.Options)) (tftags.KeyValueTags, error) { - input := &s3_sdkv2.GetObjectTaggingInput{ - Bucket: aws_sdkv2.String(bucket), - Key: aws_sdkv2.String(key), +func ObjectListTags(ctx context.Context, conn *s3.Client, bucket, key string, optFns ...func(*s3.Options)) (tftags.KeyValueTags, error) { + input := &s3.GetObjectTaggingInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), } output, err := conn.GetObjectTagging(ctx, input, optFns...) - if tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchTagSet, errCodeNoSuchTagSetError) { return tftags.New(ctx, nil), nil } @@ -106,11 +102,11 @@ func ObjectListTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key stri return tftags.New(ctx, nil), err } - return keyValueTagsV2(ctx, output.TagSet), nil + return keyValueTags(ctx, output.TagSet), nil } // ObjectUpdateTags updates S3 object tags. -func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key string, oldTagsMap, newTagsMap any, optFns ...func(*s3_sdkv2.Options)) error { +func ObjectUpdateTags(ctx context.Context, conn *s3.Client, bucket, key string, oldTagsMap, newTagsMap any, optFns ...func(*s3.Options)) error { oldTags := tftags.New(ctx, oldTagsMap) newTags := tftags.New(ctx, newTagsMap) @@ -124,11 +120,11 @@ func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key st ignoredTags := allTags.Ignore(oldTags).Ignore(newTags) if len(newTags)+len(ignoredTags) > 0 { - input := &s3_sdkv2.PutObjectTaggingInput{ - Bucket: aws_sdkv2.String(bucket), - Key: aws_sdkv2.String(key), - Tagging: &s3types_sdkv2.Tagging{ - TagSet: tagsV2(newTags.Merge(ignoredTags)), + input := &s3.PutObjectTaggingInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), + Tagging: &types.Tagging{ + TagSet: tags(newTags.Merge(ignoredTags)), }, } @@ -138,9 +134,9 @@ func ObjectUpdateTags(ctx context.Context, conn *s3_sdkv2.Client, bucket, key st return fmt.Errorf("setting resource tags (%s/%s): %w", bucket, key, err) } } else if len(oldTags) > 0 && len(ignoredTags) == 0 { - input := &s3_sdkv2.DeleteObjectTaggingInput{ - Bucket: aws_sdkv2.String(bucket), - Key: aws_sdkv2.String(key), + input := &s3.DeleteObjectTaggingInput{ + Bucket: aws.String(bucket), + Key: aws.String(key), } _, err := conn.DeleteObjectTagging(ctx, input, optFns...) diff --git a/internal/service/s3/tags_gen.go b/internal/service/s3/tags_gen.go index 4b1b0a5930f..75b9221aa62 100644 --- a/internal/service/s3/tags_gen.go +++ b/internal/service/s3/tags_gen.go @@ -4,20 +4,20 @@ package s3 import ( "context" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go-v2/aws" + awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/types" ) // []*SERVICE.Tag handling -// Tags returns s3 service tags. -func Tags(tags tftags.KeyValueTags) []*s3.Tag { - result := make([]*s3.Tag, 0, len(tags)) +// tags returns s3 service tags. +func tags(tags tftags.KeyValueTags) []awstypes.Tag { + result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { - tag := &s3.Tag{ + tag := awstypes.Tag{ Key: aws.String(k), Value: aws.String(v), } @@ -28,12 +28,12 @@ func Tags(tags tftags.KeyValueTags) []*s3.Tag { return result } -// KeyValueTags creates tftags.KeyValueTags from s3 service tags. -func KeyValueTags(ctx context.Context, tags []*s3.Tag) tftags.KeyValueTags { +// keyValueTags creates tftags.KeyValueTags from s3 service tags. +func keyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { m := make(map[string]*string, len(tags)) for _, tag := range tags { - m[aws.StringValue(tag.Key)] = tag.Value + m[aws.ToString(tag.Key)] = tag.Value } return tftags.New(ctx, m) @@ -41,9 +41,9 @@ func KeyValueTags(ctx context.Context, tags []*s3.Tag) tftags.KeyValueTags { // getTagsIn returns s3 service tags from Context. // nil is returned if there are no input tags. -func getTagsIn(ctx context.Context) []*s3.Tag { +func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { - if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + if tags := tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags } } @@ -52,8 +52,8 @@ func getTagsIn(ctx context.Context) []*s3.Tag { } // setTagsOut sets s3 service tags in Context. -func setTagsOut(ctx context.Context, tags []*s3.Tag) { +func setTagsOut(ctx context.Context, tags []awstypes.Tag) { if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(KeyValueTags(ctx, tags)) + inContext.TagsOut = types.Some(keyValueTags(ctx, tags)) } } diff --git a/internal/service/s3/tagsv2_gen.go b/internal/service/s3/tagsv2_gen.go deleted file mode 100644 index 10215b864dd..00000000000 --- a/internal/service/s3/tagsv2_gen.go +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated by internal/generate/tags/main.go; DO NOT EDIT. -package s3 - -import ( - "context" - - "github.com/aws/aws-sdk-go-v2/aws" - awstypes "github.com/aws/aws-sdk-go-v2/service/s3/types" - tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" - "github.com/hashicorp/terraform-provider-aws/internal/types" -) - -// []*SERVICE.Tag handling - -// tagsV2 returns s3 service tags. -func tagsV2(tags tftags.KeyValueTags) []awstypes.Tag { - result := make([]awstypes.Tag, 0, len(tags)) - - for k, v := range tags.Map() { - tag := awstypes.Tag{ - Key: aws.String(k), - Value: aws.String(v), - } - - result = append(result, tag) - } - - return result -} - -// keyValueTagsV2 creates tftags.KeyValueTags from s3 service tags. -func keyValueTagsV2(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags { - m := make(map[string]*string, len(tags)) - - for _, tag := range tags { - m[aws.ToString(tag.Key)] = tag.Value - } - - return tftags.New(ctx, m) -} - -// getTagsInV2 returns s3 service tags from Context. -// nil is returned if there are no input tags. -func getTagsInV2(ctx context.Context) []awstypes.Tag { - if inContext, ok := tftags.FromContext(ctx); ok { - if tags := tagsV2(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { - return tags - } - } - - return nil -} - -// setTagsOutV2 sets s3 service tags in Context. -func setTagsOutV2(ctx context.Context, tags []awstypes.Tag) { - if inContext, ok := tftags.FromContext(ctx); ok { - inContext.TagsOut = types.Some(keyValueTagsV2(ctx, tags)) - } -} From fc7fd4399e14ca2272415978d8c861d56e72afc6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Mon, 18 Dec 2023 16:57:27 -0500 Subject: [PATCH 318/438] s3: Tweak AWS SDK for Go v2 tagging code. --- internal/service/s3/bucket_analytics_configuration.go | 6 +++--- .../service/s3/bucket_intelligent_tiering_configuration.go | 6 +++--- internal/service/s3/bucket_lifecycle_configuration.go | 4 ++-- internal/service/s3/bucket_metric.go | 6 +++--- internal/service/s3/bucket_replication_configuration.go | 4 ++-- internal/service/s3/generate.go | 2 +- internal/service/s3/tags.go | 4 ++-- internal/service/s3/tags_gen.go | 6 +++--- 8 files changed, 19 insertions(+), 19 deletions(-) diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index ea13d406a7c..7c79e50d43f 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -266,7 +266,7 @@ func expandAnalyticsFilter(ctx context.Context, m map[string]interface{}) types. var tags []types.Tag if v, ok := m["tags"]; ok { - tags = tagsV2(tftags.New(ctx, v).IgnoreAWS()) + tags = Tags(tftags.New(ctx, v).IgnoreAWS()) } if prefix == "" && len(tags) == 0 { @@ -365,7 +365,7 @@ func flattenAnalyticsFilter(ctx context.Context, analyticsFilter types.Analytics result["prefix"] = aws.ToString(v) } if v := v.Value.Tags; v != nil { - result["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() + result["tags"] = keyValueTags(ctx, v).IgnoreAWS().Map() } case *types.AnalyticsFilterMemberPrefix: result["prefix"] = v.Value @@ -373,7 +373,7 @@ func flattenAnalyticsFilter(ctx context.Context, analyticsFilter types.Analytics tags := []types.Tag{ v.Value, } - result["tags"] = keyValueTagsV2(ctx, tags).IgnoreAWS().Map() + result["tags"] = keyValueTags(ctx, tags).IgnoreAWS().Map() default: return nil } diff --git a/internal/service/s3/bucket_intelligent_tiering_configuration.go b/internal/service/s3/bucket_intelligent_tiering_configuration.go index ac19a77b5cb..ea9275eefba 100644 --- a/internal/service/s3/bucket_intelligent_tiering_configuration.go +++ b/internal/service/s3/bucket_intelligent_tiering_configuration.go @@ -277,7 +277,7 @@ func expandIntelligentTieringFilter(ctx context.Context, tfMap map[string]interf var tags []types.Tag if v, ok := tfMap["tags"].(map[string]interface{}); ok { - tags = tagsV2(tftags.New(ctx, v)) + tags = Tags(tftags.New(ctx, v)) } apiObject := &types.IntelligentTieringFilter{} @@ -365,7 +365,7 @@ func flattenIntelligentTieringFilter(ctx context.Context, apiObject *types.Intel } if v := apiObject.Tag; v != nil { - tfMap["tags"] = keyValueTagsV2(ctx, []types.Tag{*v}).Map() + tfMap["tags"] = keyValueTags(ctx, []types.Tag{*v}).Map() } } else { apiObject := apiObject.And @@ -375,7 +375,7 @@ func flattenIntelligentTieringFilter(ctx context.Context, apiObject *types.Intel } if v := apiObject.Tags; v != nil { - tfMap["tags"] = keyValueTagsV2(ctx, v).Map() + tfMap["tags"] = keyValueTags(ctx, v).Map() } } diff --git a/internal/service/s3/bucket_lifecycle_configuration.go b/internal/service/s3/bucket_lifecycle_configuration.go index 10018f1340c..3425d0e5e74 100644 --- a/internal/service/s3/bucket_lifecycle_configuration.go +++ b/internal/service/s3/bucket_lifecycle_configuration.go @@ -718,7 +718,7 @@ func expandLifecycleRuleFilterMemberAnd(ctx context.Context, m map[string]interf } if v, ok := m["tags"].(map[string]interface{}); ok && len(v) > 0 { - tags := tagsV2(tftags.New(ctx, v).IgnoreAWS()) + tags := Tags(tftags.New(ctx, v).IgnoreAWS()) if len(tags) > 0 { result.Value.Tags = tags } @@ -963,7 +963,7 @@ func flattenLifecycleRuleFilterMemberAnd(ctx context.Context, andOp *types.Lifec } if v := andOp.Value.Tags; v != nil { - m["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() + m["tags"] = keyValueTags(ctx, v).IgnoreAWS().Map() } return []interface{}{m} diff --git a/internal/service/s3/bucket_metric.go b/internal/service/s3/bucket_metric.go index 2263fca3bfd..69f37d94ce4 100644 --- a/internal/service/s3/bucket_metric.go +++ b/internal/service/s3/bucket_metric.go @@ -194,7 +194,7 @@ func expandMetricsFilter(ctx context.Context, m map[string]interface{}) types.Me var tags []types.Tag if v, ok := m["tags"]; ok { - tags = tagsV2(tftags.New(ctx, v).IgnoreAWS()) + tags = Tags(tftags.New(ctx, v).IgnoreAWS()) } var metricsFilter types.MetricsFilter @@ -233,7 +233,7 @@ func flattenMetricsFilter(ctx context.Context, metricsFilter types.MetricsFilter m["prefix"] = aws.ToString(v) } if v := v.Value.Tags; v != nil { - m["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() + m["tags"] = keyValueTags(ctx, v).IgnoreAWS().Map() } case *types.MetricsFilterMemberPrefix: m["prefix"] = v.Value @@ -241,7 +241,7 @@ func flattenMetricsFilter(ctx context.Context, metricsFilter types.MetricsFilter tags := []types.Tag{ v.Value, } - m["tags"] = keyValueTagsV2(ctx, tags).IgnoreAWS().Map() + m["tags"] = keyValueTags(ctx, tags).IgnoreAWS().Map() default: return nil } diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 577eb1d9945..24dd0564961 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -828,7 +828,7 @@ func expandReplicationRuleFilterMemberAnd(ctx context.Context, l []interface{}) } if v, ok := tfMap["tags"].(map[string]interface{}); ok && len(v) > 0 { - tags := tagsV2(tftags.New(ctx, v).IgnoreAWS()) + tags := Tags(tftags.New(ctx, v).IgnoreAWS()) if len(tags) > 0 { result.Value.Tags = tags } @@ -1073,7 +1073,7 @@ func flattenReplicationRuleFilterMemberAnd(ctx context.Context, op *types.Replic } if v := op.Value.Tags; v != nil { - m["tags"] = keyValueTagsV2(ctx, v).IgnoreAWS().Map() + m["tags"] = keyValueTags(ctx, v).IgnoreAWS().Map() } return []interface{}{m} diff --git a/internal/service/s3/generate.go b/internal/service/s3/generate.go index 56c0891e267..2bab3cad22c 100644 --- a/internal/service/s3/generate.go +++ b/internal/service/s3/generate.go @@ -1,7 +1,7 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: MPL-2.0 -//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipAWSServiceImp -ServiceTagsSlice -TagsFunc=tags -KeyValueTagsFunc=keyValueTags -GetTagsInFunc=getTagsIn -SetTagsOutFunc=setTagsOut +//go:generate go run ../../generate/tags/main.go -AWSSDKVersion=2 -SkipAWSServiceImp -ServiceTagsSlice -TagsFunc=Tags -KeyValueTagsFunc=keyValueTags -GetTagsInFunc=getTagsIn -SetTagsOutFunc=setTagsOut //go:generate go run ../../generate/servicepackage/main.go // ONLY generate directives and package declaration! Do not add anything else to this file. diff --git a/internal/service/s3/tags.go b/internal/service/s3/tags.go index f2f14433b71..ba56f1f2f18 100644 --- a/internal/service/s3/tags.go +++ b/internal/service/s3/tags.go @@ -61,7 +61,7 @@ func BucketUpdateTags(ctx context.Context, conn *s3.Client, identifier string, o input := &s3.PutBucketTaggingInput{ Bucket: aws.String(identifier), Tagging: &types.Tagging{ - TagSet: tags(newTags.Merge(ignoredTags)), + TagSet: Tags(newTags.Merge(ignoredTags)), }, } @@ -124,7 +124,7 @@ func ObjectUpdateTags(ctx context.Context, conn *s3.Client, bucket, key string, Bucket: aws.String(bucket), Key: aws.String(key), Tagging: &types.Tagging{ - TagSet: tags(newTags.Merge(ignoredTags)), + TagSet: Tags(newTags.Merge(ignoredTags)), }, } diff --git a/internal/service/s3/tags_gen.go b/internal/service/s3/tags_gen.go index 75b9221aa62..60d30f43570 100644 --- a/internal/service/s3/tags_gen.go +++ b/internal/service/s3/tags_gen.go @@ -12,8 +12,8 @@ import ( // []*SERVICE.Tag handling -// tags returns s3 service tags. -func tags(tags tftags.KeyValueTags) []awstypes.Tag { +// Tags returns s3 service tags. +func Tags(tags tftags.KeyValueTags) []awstypes.Tag { result := make([]awstypes.Tag, 0, len(tags)) for k, v := range tags.Map() { @@ -43,7 +43,7 @@ func keyValueTags(ctx context.Context, tags []awstypes.Tag) tftags.KeyValueTags // nil is returned if there are no input tags. func getTagsIn(ctx context.Context) []awstypes.Tag { if inContext, ok := tftags.FromContext(ctx); ok { - if tags := tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { + if tags := Tags(inContext.TagsIn.UnwrapOrDefault()); len(tags) > 0 { return tags } } From f39bca27b5d92086f9e55461a4e90c2e1e61bb7c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 08:22:19 -0500 Subject: [PATCH 319/438] Update bucket.go --- internal/service/s3/bucket.go | 1 - 1 file changed, 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 7a238751c7a..17a7cbf56e3 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -31,7 +31,6 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" - "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" "github.com/hashicorp/terraform-provider-aws/internal/errs" From 9592b8185089b96ddf600412ae507455340f18d4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 08:29:04 -0500 Subject: [PATCH 320/438] Add CHANGELOG entry. --- .changelog/34890.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/34890.txt diff --git a/.changelog/34890.txt b/.changelog/34890.txt new file mode 100644 index 00000000000..9a474e1a470 --- /dev/null +++ b/.changelog/34890.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Modify server-side encryption configuration error handling, enabling support for NetApp StorageGRID +``` \ No newline at end of file From e67a0ff50fcae08e0a952e3955d191365cb8ce3d Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 09:23:10 -0500 Subject: [PATCH 321/438] r/aws_s3_bucket: Migrate acceptance tests to AWS SDK for Go v2. --- internal/service/s3/bucket_test.go | 253 ++++++++++++++-------------- internal/service/s3/errors.go | 6 +- internal/service/s3/exports_test.go | 2 + 3 files changed, 131 insertions(+), 130 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index bddb085ee2c..72553200794 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -13,13 +13,12 @@ import ( "time" "github.com/YakDriver/regexache" - aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" - s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" @@ -32,10 +31,11 @@ import ( tfs3 "github.com/hashicorp/terraform-provider-aws/internal/service/s3" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" ) func init() { - acctest.RegisterServiceErrorCheckFunc(s3.EndpointsID, testAccErrorCheckSkip) + acctest.RegisterServiceErrorCheckFunc(names.S3EndpointID, testAccErrorCheckSkip) } // testAccErrorCheckSkip skips tests that have error messages indicating unsupported features @@ -55,7 +55,7 @@ func TestAccS3Bucket_Basic_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -95,7 +95,7 @@ func TestAccS3Bucket_Basic_emptyString(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -123,7 +123,7 @@ func TestAccS3Bucket_Basic_nameGenerated(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -151,7 +151,7 @@ func TestAccS3Bucket_Basic_namePrefix(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -180,7 +180,7 @@ func TestAccS3Bucket_Basic_forceDestroy(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -202,7 +202,7 @@ func TestAccS3Bucket_Basic_forceDestroyWithObjectVersions(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -231,7 +231,7 @@ func TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -253,7 +253,7 @@ func TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -278,15 +278,15 @@ func TestAccS3Bucket_Basic_acceleration(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckPartitionHasService(t, cloudfront.EndpointsID) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_acceleration(bucketName, s3.BucketAccelerateStatusEnabled), + Config: testAccBucketConfig_acceleration(bucketName, string(types.BucketAccelerateStatusEnabled)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acceleration_status", s3.BucketAccelerateStatusEnabled), + resource.TestCheckResourceAttr(resourceName, "acceleration_status", string(types.BucketAccelerateStatusEnabled)), ), }, { @@ -296,10 +296,10 @@ func TestAccS3Bucket_Basic_acceleration(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy"}, }, { - Config: testAccBucketConfig_acceleration(bucketName, s3.BucketAccelerateStatusSuspended), + Config: testAccBucketConfig_acceleration(bucketName, string(types.BucketAccelerateStatusSuspended)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "acceleration_status", s3.BucketAccelerateStatusSuspended), + resource.TestCheckResourceAttr(resourceName, "acceleration_status", string(types.BucketAccelerateStatusSuspended)), ), }, }, @@ -313,7 +313,7 @@ func TestAccS3Bucket_Basic_keyEnabled(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -346,15 +346,15 @@ func TestAccS3Bucket_Basic_requestPayer(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_requestPayer(bucketName, s3.PayerBucketOwner), + Config: testAccBucketConfig_requestPayer(bucketName, string(types.PayerBucketOwner)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "request_payer", s3.PayerBucketOwner), + resource.TestCheckResourceAttr(resourceName, "request_payer", string(types.PayerBucketOwner)), ), }, { @@ -364,10 +364,10 @@ func TestAccS3Bucket_Basic_requestPayer(t *testing.T) { ImportStateVerifyIgnore: []string{"force_destroy", "acl"}, }, { - Config: testAccBucketConfig_requestPayer(bucketName, s3.PayerRequester), + Config: testAccBucketConfig_requestPayer(bucketName, string(types.PayerRequester)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), - resource.TestCheckResourceAttr(resourceName, "request_payer", s3.PayerRequester), + resource.TestCheckResourceAttr(resourceName, "request_payer", string(types.PayerRequester)), ), }, }, @@ -384,7 +384,7 @@ func TestAccS3Bucket_disappears(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -409,13 +409,13 @@ func TestAccS3Bucket_Duplicate_basic(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckRegionNot(t, endpoints.UsEast1RegionID) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketConfig_duplicate(region, bucketName), - ExpectError: regexache.MustCompile(s3.ErrCodeBucketAlreadyOwnedByYou), + ExpectError: regexache.MustCompile(tfs3.ErrCodeBucketAlreadyOwnedByYou), }, }, }) @@ -429,13 +429,13 @@ func TestAccS3Bucket_Duplicate_UsEast1(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckPartition(t, endpoints.AwsPartitionID) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketConfig_duplicate(endpoints.UsEast1RegionID, bucketName), - ExpectError: regexache.MustCompile(tfs3.ErrMessageBucketAlreadyExists), + ExpectError: regexache.MustCompile(tfs3.ErrCodeBucketAlreadyExists), }, }, }) @@ -450,13 +450,13 @@ func TestAccS3Bucket_Duplicate_UsEast1AltAccount(t *testing.T) { acctest.PreCheckPartition(t, endpoints.AwsPartitionID) acctest.PreCheckAlternateAccount(t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesAlternate(ctx, t), CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { Config: testAccBucketConfig_duplicateAltAccount(endpoints.UsEast1RegionID, bucketName), - ExpectError: regexache.MustCompile(s3.ErrCodeBucketAlreadyExists), + ExpectError: regexache.MustCompile(tfs3.ErrCodeBucketAlreadyExists), }, }, }) @@ -469,7 +469,7 @@ func TestAccS3Bucket_Tags_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -493,7 +493,7 @@ func TestAccS3Bucket_Tags_withNoSystemTags(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -555,7 +555,7 @@ func TestAccS3Bucket_Tags_withSystemTags(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketDestroy(ctx), @@ -639,7 +639,7 @@ func TestAccS3Bucket_Tags_ignoreTags(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -685,7 +685,7 @@ func TestAccS3Bucket_Manage_lifecycleBasic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -776,7 +776,7 @@ func TestAccS3Bucket_Manage_lifecycleExpireMarkerOnly(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -815,7 +815,7 @@ func TestAccS3Bucket_Manage_lifecycleRuleExpirationEmptyBlock(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -837,7 +837,7 @@ func TestAccS3Bucket_Manage_lifecycleRuleAbortIncompleteMultipartUploadDaysNoExp resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -864,7 +864,7 @@ func TestAccS3Bucket_Manage_lifecycleRemove(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -895,7 +895,7 @@ func TestAccS3Bucket_Manage_objectLock(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -905,7 +905,7 @@ func TestAccS3Bucket_Manage_objectLock(t *testing.T) { testAccCheckBucketExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "0"), ), }, @@ -937,7 +937,7 @@ func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -947,7 +947,7 @@ func TestAccS3Bucket_Manage_objectLock_deprecatedEnabled(t *testing.T) { testAccCheckBucketExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.rule.#", "0"), ), }, @@ -968,7 +968,7 @@ func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -978,7 +978,7 @@ func TestAccS3Bucket_Manage_objectLock_migrate(t *testing.T) { testAccCheckBucketExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), ), }, { @@ -996,7 +996,7 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1006,7 +1006,7 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning(t *testing.T) { testAccCheckBucketExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), ), }, { @@ -1026,7 +1026,7 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testin resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1036,7 +1036,7 @@ func TestAccS3Bucket_Manage_objectLockWithVersioning_deprecatedEnabled(t *testin testAccCheckBucketExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "true"), resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "1"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", s3.ObjectLockEnabledEnabled), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.0.object_lock_enabled", string(types.ObjectLockEnabledEnabled)), ), }, { @@ -1056,7 +1056,7 @@ func TestAccS3Bucket_Manage_versioning(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1101,7 +1101,7 @@ func TestAccS3Bucket_Manage_versioningDisabled(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1131,7 +1131,7 @@ func TestAccS3Bucket_Manage_MFADeleteDisabled(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1161,7 +1161,7 @@ func TestAccS3Bucket_Manage_versioningAndMFADeleteDisabled(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1200,12 +1200,12 @@ func TestAccS3Bucket_Replication_basic(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_replication(bucketName, s3.StorageClassStandard), + Config: testAccBucketConfig_replication(bucketName, string(types.StorageClassStandard)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExistsWithProvider(ctx, resourceName, acctest.RegionProviderFunc(region, &providers)), resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), @@ -1215,7 +1215,7 @@ func TestAccS3Bucket_Replication_basic(t *testing.T) { ), }, { - Config: testAccBucketConfig_replication(bucketName, s3.StorageClassGlacier), + Config: testAccBucketConfig_replication(bucketName, string(types.StorageClassGlacier)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExistsWithProvider(ctx, resourceName, acctest.RegionProviderFunc(region, &providers)), resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "1"), @@ -1252,7 +1252,7 @@ func TestAccS3Bucket_Replication_multipleDestinationsEmptyFilter(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1320,7 +1320,7 @@ func TestAccS3Bucket_Replication_multipleDestinationsNonEmptyFilter(t *testing.T acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1393,7 +1393,7 @@ func TestAccS3Bucket_Replication_twoDestination(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1452,7 +1452,7 @@ func TestAccS3Bucket_Replication_ruleDestinationAccessControlTranslation(t *test acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1501,7 +1501,7 @@ func TestAccS3Bucket_Replication_ruleDestinationAddAccessControlTranslation(t *t acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1550,7 +1550,7 @@ func TestAccS3Bucket_Replication_withoutStorageClass(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1584,7 +1584,7 @@ func TestAccS3Bucket_Replication_expectVersioningValidationError(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1612,7 +1612,7 @@ func TestAccS3Bucket_Replication_withoutPrefix(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1650,7 +1650,7 @@ func TestAccS3Bucket_Replication_schemaV2(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1723,7 +1723,7 @@ func TestAccS3Bucket_Replication_schemaV2SameRegion(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1767,7 +1767,7 @@ func TestAccS3Bucket_Replication_RTC_valid(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ @@ -1830,15 +1830,15 @@ func TestAccS3Bucket_Security_corsUpdate(t *testing.T) { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - _, err := conn.PutBucketCorsWithContext(ctx, &s3.PutBucketCorsInput{ + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + _, err := conn.PutBucketCors(ctx, &s3.PutBucketCorsInput{ Bucket: aws.String(rs.Primary.ID), - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: []*s3.CORSRule{ + CORSConfiguration: &types.CORSConfiguration{ + CORSRules: []types.CORSRule{ { - AllowedHeaders: []*string{aws.String("*")}, - AllowedMethods: []*string{aws.String("GET")}, - AllowedOrigins: []*string{aws.String("https://www.example.com")}, + AllowedHeaders: []string{"*"}, + AllowedMethods: []string{"GET"}, + AllowedOrigins: []string{"https://www.example.com"}, }, }, }, @@ -1852,7 +1852,7 @@ func TestAccS3Bucket_Security_corsUpdate(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1916,8 +1916,8 @@ func TestAccS3Bucket_Security_corsDelete(t *testing.T) { return fmt.Errorf("Not found: %s", n) } - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) - _, err := conn.DeleteBucketCorsWithContext(ctx, &s3.DeleteBucketCorsInput{ + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + _, err := conn.DeleteBucketCors(ctx, &s3.DeleteBucketCorsInput{ Bucket: aws.String(rs.Primary.ID), }) if err != nil && !tfawserr.ErrCodeEquals(err, tfs3.ErrCodeNoSuchCORSConfiguration) { @@ -1929,7 +1929,7 @@ func TestAccS3Bucket_Security_corsDelete(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1952,7 +1952,7 @@ func TestAccS3Bucket_Security_corsEmptyOrigin(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -1991,7 +1991,7 @@ func TestAccS3Bucket_Security_corsSingleMethodAndEmptyOrigin(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -2018,7 +2018,7 @@ func TestAccS3Bucket_Security_logging(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -2048,7 +2048,7 @@ func TestAccS3Bucket_Security_enableDefaultEncryptionWhenTypical(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -2059,7 +2059,7 @@ func TestAccS3Bucket_Security_enableDefaultEncryptionWhenTypical(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAwsKms), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAwsKms)), resource.TestMatchResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", regexache.MustCompile("^arn")), ), }, @@ -2080,18 +2080,18 @@ func TestAccS3Bucket_Security_enableDefaultEncryptionWhenAES256IsUsed(t *testing resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_defaultEncryptionDefaultKey(bucketName, s3.ServerSideEncryptionAes256), + Config: testAccBucketConfig_defaultEncryptionDefaultKey(bucketName, string(types.ServerSideEncryptionAes256)), Check: resource.ComposeTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), - resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", s3.ServerSideEncryptionAes256), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", string(types.ServerSideEncryptionAes256)), resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), ), }, @@ -2112,12 +2112,12 @@ func TestAccS3Bucket_Security_disableDefaultEncryptionWhenDefaultEncryptionIsEna resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_defaultEncryptionDefaultKey(bucketName, s3.ServerSideEncryptionAwsKms), + Config: testAccBucketConfig_defaultEncryptionDefaultKey(bucketName, string(types.ServerSideEncryptionAwsKms)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), ), @@ -2149,7 +2149,7 @@ func TestAccS3Bucket_Web_simple(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -2202,7 +2202,7 @@ func TestAccS3Bucket_Web_redirect(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -2253,7 +2253,7 @@ func TestAccS3Bucket_Web_routingRules(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ @@ -2615,10 +2615,6 @@ func testAccCheckBucketExistsWithProvider(ctx context.Context, n string, provide return fmt.Errorf("Not found: %s", n) } - if rs.Primary.ID == "" { - return fmt.Errorf("No S3 Bucket ID is set") - } - conn := providerF().Meta().(*conns.AWSClient).S3Client(ctx) return tfs3.FindBucket(ctx, conn, rs.Primary.ID) @@ -2635,9 +2631,9 @@ func testAccCheckBucketAddObjects(ctx context.Context, n string, keys ...string) } for _, key := range keys { - _, err := conn.PutObject(ctx, &s3_sdkv2.PutObjectInput{ - Bucket: aws_sdkv2.String(rs.Primary.ID), - Key: aws_sdkv2.String(key), + _, err := conn.PutObject(ctx, &s3.PutObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), }) if err != nil { @@ -2653,13 +2649,13 @@ func testAccCheckBucketAddObjectsWithLegalHold(ctx context.Context, n string, ke return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, key := range keys { - _, err := conn.PutObjectWithContext(ctx, &s3.PutObjectInput{ + _, err := conn.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(rs.Primary.ID), Key: aws.String(key), - ObjectLockLegalHoldStatus: aws.String(s3.ObjectLockLegalHoldStatusOn), + ObjectLockLegalHoldStatus: types.ObjectLockLegalHoldStatusOn, }) if err != nil { @@ -2674,12 +2670,12 @@ func testAccCheckBucketAddObjectsWithLegalHold(ctx context.Context, n string, ke func testAccCheckBucketAddObjectWithMetadata(ctx context.Context, n string, key string, metadata map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) - _, err := conn.PutObjectWithContext(ctx, &s3.PutObjectInput{ + _, err := conn.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(rs.Primary.ID), Key: aws.String(key), - Metadata: aws.StringMap(metadata), + Metadata: metadata, }) if err != nil { @@ -2696,9 +2692,9 @@ func testAccCheckBucketDeleteObjects(ctx context.Context, n string, keys ...stri conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) for _, key := range keys { - _, err := conn.DeleteObject(ctx, &s3_sdkv2.DeleteObjectInput{ - Bucket: aws_sdkv2.String(rs.Primary.ID), - Key: aws_sdkv2.String(key), + _, err := conn.DeleteObject(ctx, &s3.DeleteObjectInput{ + Bucket: aws.String(rs.Primary.ID), + Key: aws.String(key), }) if err != nil { @@ -2711,7 +2707,7 @@ func testAccCheckBucketDeleteObjects(ctx context.Context, n string, keys ...stri } // Create an S3 bucket via a CF stack so that it has system tags. -func testAccCheckBucketCreateViaCloudFormation(ctx context.Context, n string, stackID *string) resource.TestCheckFunc { +func testAccCheckBucketCreateViaCloudFormation(ctx context.Context, n string, v *string) resource.TestCheckFunc { return func(s *terraform.State) error { conn := acctest.Provider.Meta().(*conns.AWSClient).CloudFormationConn(ctx) stackName := sdkacctest.RandomWithPrefix("tf-acc-test-s3tags") @@ -2727,28 +2723,31 @@ func testAccCheckBucketCreateViaCloudFormation(ctx context.Context, n string, st }`, n) requestToken := id.UniqueId() - req := &cloudformation.CreateStackInput{ + input := &cloudformation.CreateStackInput{ + ClientRequestToken: aws.String(requestToken), StackName: aws.String(stackName), TemplateBody: aws.String(templateBody), - ClientRequestToken: aws.String(requestToken), } - log.Printf("[DEBUG] Creating CloudFormation stack: %s", req) - resp, err := conn.CreateStackWithContext(ctx, req) + output, err := conn.CreateStackWithContext(ctx, input) + if err != nil { - return fmt.Errorf("error creating CloudFormation stack: %w", err) + return fmt.Errorf("creating CloudFormation Stack: %w", err) } - stack, err := tfcloudformation.WaitStackCreated(ctx, conn, aws.StringValue(resp.StackId), requestToken, 10*time.Minute) + stackID := aws.ToString(output.StackId) + stack, err := tfcloudformation.WaitStackCreated(ctx, conn, stackID, requestToken, 10*time.Minute) + if err != nil { - return fmt.Errorf("Error waiting for CloudFormation stack creation: %w", err) + return fmt.Errorf("waiting for CloudFormation Stack (%s) create: %w", stackID, err) } - status := aws.StringValue(stack.StackStatus) - if status != cloudformation.StackStatusCreateComplete { - return fmt.Errorf("Invalid CloudFormation stack creation status: %s", status) + + if status := aws.ToString(stack.StackStatus); status != cloudformation.StackStatusCreateComplete { + return fmt.Errorf("invalid CloudFormation Stack (%s) status: %s", stackID, status) } - *stackID = aws.StringValue(resp.StackId) + *v = stackID + return nil } } @@ -2756,9 +2755,11 @@ func testAccCheckBucketCreateViaCloudFormation(ctx context.Context, n string, st func testAccCheckBucketTagKeys(ctx context.Context, n string, keys ...string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) + + bucket := rs.Primary.Attributes["bucket"] + got, err := tfs3.BucketListTags(ctx, conn, bucket) - got, err := tfs3.BucketListTags(ctx, conn, rs.Primary.Attributes["bucket"]) if err != nil { return err } @@ -2772,7 +2773,7 @@ func testAccCheckBucketTagKeys(ctx context.Context, n string, keys ...string) re } } if !ok { - return fmt.Errorf("Key %s not found in bucket's tag set", want) + return fmt.Errorf("key %s not found in S3 Bucket (%s) tag set", bucket, want) } } @@ -2791,7 +2792,7 @@ func testAccCheckBucketDomainName(resourceName string, attributeName string, buc func testAccBucketRegionalDomainName(bucket, region string) string { regionalEndpoint, err := tfs3.BucketRegionalDomainName(bucket, region) if err != nil { - return fmt.Sprintf("Regional endpoint not found for bucket %s", bucket) + return fmt.Sprintf("regional endpoint not found for S3 Bucket (%s)", bucket) } return regionalEndpoint } @@ -2808,7 +2809,7 @@ func testAccCheckBucketWebsiteEndpoint(resourceName string, attributeName string func testAccCheckBucketUpdateTags(ctx context.Context, n string, oldTags, newTags map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) return tfs3.BucketUpdateTags(ctx, conn, rs.Primary.Attributes["bucket"], oldTags, newTags) } @@ -2817,7 +2818,7 @@ func testAccCheckBucketUpdateTags(ctx context.Context, n string, oldTags, newTag func testAccCheckBucketCheckTags(ctx context.Context, n string, expectedTags map[string]string) resource.TestCheckFunc { return func(s *terraform.State) error { rs := s.RootModule().Resources[n] - conn := acctest.Provider.Meta().(*conns.AWSClient).S3Conn(ctx) + conn := acctest.Provider.Meta().(*conns.AWSClient).S3Client(ctx) got, err := tfs3.BucketListTags(ctx, conn, rs.Primary.Attributes["bucket"]) if err != nil { diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index a65049ca479..369ce7755af 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -12,6 +12,8 @@ import ( const ( errCodeAccessDenied = "AccessDenied" + errCodeBucketAlreadyExists = "BucketAlreadyExists" + errCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" errCodeBucketNotEmpty = "BucketNotEmpty" errCodeInvalidArgument = "InvalidArgument" errCodeInvalidBucketState = "InvalidBucketState" @@ -44,10 +46,6 @@ const ( errCodeXNotImplemented = "XNotImplemented" ) -const ( - ErrMessageBucketAlreadyExists = "bucket already exists" -) - func errDirectoryBucket(err error) error { return fmt.Errorf("directory buckets are not supported: %w", err) } diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 952663c63fb..42495f702a1 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -33,6 +33,8 @@ var ( IsDirectoryBucket = isDirectoryBucket SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey + ErrCodeBucketAlreadyExists = errCodeBucketAlreadyExists + ErrCodeBucketAlreadyOwnedByYou = errCodeBucketAlreadyOwnedByYou ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration LifecycleRuleStatusDisabled = lifecycleRuleStatusDisabled LifecycleRuleStatusEnabled = lifecycleRuleStatusEnabled From 536be433b0a363d1e7cfc802711cb42d5d0dcbb7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 09:29:33 -0500 Subject: [PATCH 322/438] Additional AWS Regions in 'names.go'. --- names/names.go | 39 +++++++++++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 4 deletions(-) diff --git a/names/names.go b/names/names.go index 30dd6320c9d..2195941e82b 100644 --- a/names/names.go +++ b/names/names.go @@ -105,12 +105,43 @@ const ( ) const ( + // AWS Standard partition's regions. GlobalRegionID = "aws-global" // AWS Standard global region. - USEast1RegionID = "us-east-1" // US East (N. Virginia). - USWest1RegionID = "us-west-1" // US West (N. California). - USWest2RegionID = "us-west-2" // US West (Oregon). - + AFSouth1RegionID = "af-south-1" // Africa (Cape Town). + APEast1RegionID = "ap-east-1" // Asia Pacific (Hong Kong). + APNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + APNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + APNortheast3RegionID = "ap-northeast-3" // Asia Pacific (Osaka). + APSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + APSouth2RegionID = "ap-south-2" // Asia Pacific (Hyderabad). + APSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + APSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + APSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). + APSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). + CACentral1RegionID = "ca-central-1" // Canada (Central). + EUCentral1RegionID = "eu-central-1" // Europe (Frankfurt). + EUCentral2RegionID = "eu-central-2" // Europe (Zurich). + EUNorth1RegionID = "eu-north-1" // Europe (Stockholm). + EUSouth1RegionID = "eu-south-1" // Europe (Milan). + EUSouth2RegionID = "eu-south-2" // Europe (Spain). + EUWest1RegionID = "eu-west-1" // Europe (Ireland). + EUWest2RegionID = "eu-west-2" // Europe (London). + EUWest3RegionID = "eu-west-3" // Europe (Paris). + ILCentral1RegionID = "il-central-1" // Israel (Tel Aviv). + MECentral1RegionID = "me-central-1" // Middle East (UAE). + MESouth1RegionID = "me-south-1" // Middle East (Bahrain). + SAEast1RegionID = "sa-east-1" // South America (Sao Paulo). + USEast1RegionID = "us-east-1" // US East (N. Virginia). + USEast2RegionID = "us-east-2" // US East (Ohio). + USWest1RegionID = "us-west-1" // US West (N. California). + USWest2RegionID = "us-west-2" // US West (Oregon). + + // AWS China partition's regions. + CNNorth1RegionID = "cn-north-1" // China (Beijing). + CNNorthwest1RegionID = "cn-northwest-1" // China (Ningxia). + + // AWS GovCloud (US) partition's regions. USGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). USGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) From b17edd7ea52da20cb675408c82b60445ab7ce9da Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 09:57:44 -0500 Subject: [PATCH 323/438] build(deps): bump github.com/aws/aws-sdk-go in /.ci/providerlint (#34987) Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.49.4 to 1.49.5. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.4...v1.49.5) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 76 +++++++++++++++++++ .ci/providerlint/vendor/modules.txt | 2 +- 4 files changed, 80 insertions(+), 4 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index ab9b94f28d8..2ea8f34084b 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.4 + github.com/aws/aws-sdk-go v1.49.5 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 1b1c2f84fcc..477c0a99c76 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -7,8 +7,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= -github.com/aws/aws-sdk-go v1.49.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA= +github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index e519246f43e..16de4d78eec 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -5442,6 +5442,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, @@ -9028,6 +9031,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "il-central-1", + }: endpoint{}, endpointKey{ Region: "me-central-1", }: endpoint{}, @@ -14866,6 +14872,9 @@ var awsPartition = partition{ endpointKey{ Region: "il-central-1", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "me-south-1", }: endpoint{}, @@ -43377,15 +43386,61 @@ var awsisoPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-iso-east-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-east-1", + }, + Deprecated: boxedTrue, + }, + endpointKey{ + Region: "fips-us-iso-west-1", + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + CredentialScope: credentialScope{ + Region: "us-iso-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-iso-east-1", }: endpoint{ Protocols: []string{"http", "https"}, SignatureVersions: []string{"s3v4"}, }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + endpointKey{ + Region: "us-iso-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-east-1.c2s.ic.gov", + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, endpointKey{ Region: "us-iso-west-1", }: endpoint{}, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-iso-west-1.c2s.ic.gov", + }, + endpointKey{ + Region: "us-iso-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-iso-west-1.c2s.ic.gov", + }, }, }, "secretsmanager": service{ @@ -44173,9 +44228,30 @@ var awsisobPartition = partition{ }, }, Endpoints: serviceEndpoints{ + endpointKey{ + Region: "fips-us-isob-east-1", + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + CredentialScope: credentialScope{ + Region: "us-isob-east-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "us-isob-east-1", }: endpoint{}, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.us-isob-east-1.sc2s.sgov.gov", + }, + endpointKey{ + Region: "us-isob-east-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.us-isob-east-1.sc2s.sgov.gov", + }, }, }, "secretsmanager": service{ diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index b632043e690..290e639c817 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.4 +# github.com/aws/aws-sdk-go v1.49.5 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints From 899cd8ee5c8640ac676efb27121af24105471d71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 09:58:38 -0500 Subject: [PATCH 324/438] build(deps): bump the aws-sdk-go group with 5 updates (#34986) Bumps the aws-sdk-go group with 5 updates: | Package | From | To | | --- | --- | --- | | [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) | `1.49.4` | `1.49.5` | | [github.com/aws/aws-sdk-go-v2/feature/s3/manager](https://github.com/aws/aws-sdk-go-v2) | `1.15.7` | `1.15.8` | | [github.com/aws/aws-sdk-go-v2/service/eks](https://github.com/aws/aws-sdk-go-v2) | `1.35.5` | `1.36.0` | | [github.com/aws/aws-sdk-go-v2/service/identitystore](https://github.com/aws/aws-sdk-go-v2) | `1.21.5` | `1.21.6` | | [github.com/aws/aws-sdk-go-v2/service/s3control](https://github.com/aws/aws-sdk-go-v2) | `1.41.5` | `1.41.6` | Updates `github.com/aws/aws-sdk-go` from 1.49.4 to 1.49.5 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.4...v1.49.5) Updates `github.com/aws/aws-sdk-go-v2/feature/s3/manager` from 1.15.7 to 1.15.8 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.15.7...config/v1.15.8) Updates `github.com/aws/aws-sdk-go-v2/service/eks` from 1.35.5 to 1.36.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.36.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecs/v1.35.5...service/s3/v1.36.0) Updates `github.com/aws/aws-sdk-go-v2/service/identitystore` from 1.21.5 to 1.21.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/pi/v1.21.5...service/efs/v1.21.6) Updates `github.com/aws/aws-sdk-go-v2/service/s3control` from 1.41.5 to 1.41.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3control/v1.41.5...service/s3control/v1.41.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/feature/s3/manager dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/eks dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/identitystore dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3control dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 12 ++++++------ go.sum | 24 ++++++++++++------------ 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index 9f200c85948..956b6a50628 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,11 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.4 + github.com/aws/aws-sdk-go v1.49.5 github.com/aws/aws-sdk-go-v2 v1.24.0 github.com/aws/aws-sdk-go-v2/config v1.26.1 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8 github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5 github.com/aws/aws-sdk-go-v2/service/account v1.14.5 github.com/aws/aws-sdk-go-v2/service/acm v1.22.5 @@ -39,7 +39,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 - github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 + github.com/aws/aws-sdk-go-v2/service/eks v1.36.0 github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 @@ -47,7 +47,7 @@ require ( github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5 github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 - github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5 + github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.6 github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5 github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5 @@ -77,8 +77,8 @@ require ( github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 - github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 - github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5 + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6 + github.com/aws/aws-sdk-go-v2/service/s3control v1.41.6 github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5 github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0 github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5 diff --git a/go.sum b/go.sum index 52445e50244..910c44c34e3 100644 --- a/go.sum +++ b/go.sum @@ -21,8 +21,8 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= -github.com/aws/aws-sdk-go v1.49.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA= +github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= @@ -33,8 +33,8 @@ github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuT github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8 h1:7wCngExMTAW2Bjf0Y92uWap6ZUcenLLWI5T3VJiQneU= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8/go.mod h1:XVrAWYYM4ZRwOCOuLoUiao5hbLqNutEdqwCR3ZvkXgc= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= @@ -103,8 +103,8 @@ github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 h1:cP43vFYAQyREOp972C+6d4+dzpx github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 h1:wLPDAUFT50NEXGXpywRU3AA74pg35RJjWol/68ruvQQ= github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5/go.mod h1:AOHmGMoPtSY9Zm2zBuwUJQBisIvYAZeA1n7b6f4e880= -github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 h1:LEYyWSnfdSSysPr5JWUkNwOD0MvXKfE/BX6Frg/lr1A= -github.com/aws/aws-sdk-go-v2/service/eks v1.35.5/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= +github.com/aws/aws-sdk-go-v2/service/eks v1.36.0 h1:5jk86RO+sFu2BjMz2GcQ9Yf2IEi2Ntec2wPOt/lDc5c= +github.com/aws/aws-sdk-go-v2/service/eks v1.36.0/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 h1:dZtEDpqYVg3i5oT8lSXxEsg6dInewHA3qNuyzHTvWck= github.com/aws/aws-sdk-go-v2/service/emr v1.35.5/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 h1:O2ppygCppB40GS7lDJUX4dGEgEdsKkX62oIAGgre/rY= @@ -121,8 +121,8 @@ github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 h1:lm7KEWrkI54kso0o3qwOD github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5/go.mod h1:5IxzIDau0tsh8NRR6wcRp8u1Xn9QY9CcD9e34lpFqEQ= github.com/aws/aws-sdk-go-v2/service/iam v1.28.5 h1:Ts2eDDuMLrrmd0ARlg5zSoBQUvhdthgiNnPdiykTJs0= github.com/aws/aws-sdk-go-v2/service/iam v1.28.5/go.mod h1:kKI0gdVsf+Ev9knh/3lBJbchtX5LLNH25lAzx3KDj3Q= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5 h1:x93yL/0ey4Y/HEBSsqcLNQDDeIVRLOdziLMg3+YM/F8= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5/go.mod h1:vs4IYQdGHOLq6DsPfSuoADmRzr/AeWIk8m50XBnwN/o= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.6 h1:myI4L7UVKRDV1m97FRh0UUbTvsexqRanej7iXLLeLyc= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.6/go.mod h1:vs4IYQdGHOLq6DsPfSuoADmRzr/AeWIk8m50XBnwN/o= github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 h1:PKwE3fh67K7Kig3LlbuipQOrNSraQuEpFl09VOpaNvc= github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5/go.mod h1:hIgLcOPNanV8IteYZUx1YyLUJf//t0dI1F2+ecjVvlo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= @@ -191,10 +191,10 @@ github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 h1:K//BccrDBRMSQCa4UkV github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6/go.mod h1:e2+mEoq1rHtFpX8p6WcgiFgnDz0zG6y1BY/g8us9g2I= github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 h1:WDr8iQXuDzL6ERqRvpdIy1ZdOjg6lXlEHSo8wOJiOyI= github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5/go.mod h1:7fnaaVoKfZaWJ8RuNYTYV3SkqD6BkFYlRuFDEkHajpc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= -github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5 h1:Rv7K8i7cvpy0XWt06r4vDKyMswLld6mnOyfs8b38534= -github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5/go.mod h1:sjVex3IIN70lry8Diga0vdi1DoHFwyFXY68ols4I8VI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6 h1:bkmlzokzTJyrFNA0J+EPlsF8x4/wp+9D45HTHO/ZUiY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= +github.com/aws/aws-sdk-go-v2/service/s3control v1.41.6 h1:pUtQfdf+KaKjsXFFlvVMVJpyVttwE5/tDTKgVX4oGcA= +github.com/aws/aws-sdk-go-v2/service/s3control v1.41.6/go.mod h1:sjVex3IIN70lry8Diga0vdi1DoHFwyFXY68ols4I8VI= github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5 h1:RpON5qyMUJKOGdQt0K7RUmV0zTUVSSGWtjvh/0CAqd8= github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5/go.mod h1:CXWnhzgqEhXAYwTVg4vBZQcP+yb4KxXOkogYih2tFm8= github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0 h1:ft7wTBdLlWGoZpF22CHmDywWj//MTUjyJoevEXBRHZg= From 2d6b8aa3cdb17bc443ff9c6a1761e95665ba506b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:00:59 -0500 Subject: [PATCH 325/438] build(deps): bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#34980) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: direct:production ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 956b6a50628..8c2d7a73991 100644 --- a/go.mod +++ b/go.mod @@ -131,7 +131,7 @@ require ( github.com/mitchellh/mapstructure v1.5.0 github.com/pquerna/otp v1.4.0 github.com/shopspring/decimal v1.3.1 - golang.org/x/crypto v0.16.0 + golang.org/x/crypto v0.17.0 golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/tools v0.14.0 gopkg.in/dnaeon/go-vcr.v3 v3.1.2 diff --git a/go.sum b/go.sum index 910c44c34e3..0761b35d658 100644 --- a/go.sum +++ b/go.sum @@ -476,8 +476,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= From a56dabc3c706254be66880d728d6e054b2648e81 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:01:12 -0500 Subject: [PATCH 326/438] build(deps): bump golang.org/x/crypto in /.ci/providerlint (#34981) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.16.0 to 0.17.0. - [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 ++-- .../golang.org/x/crypto/argon2/blamka_amd64.s | 12 +++++----- .../x/crypto/blake2b/blake2bAVX2_amd64.go | 2 +- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 2 +- .../x/crypto/blake2b/blake2b_amd64.go | 24 ------------------- .../golang.org/x/crypto/blake2b/register.go | 2 -- .../golang.org/x/crypto/sha3/keccakf_amd64.s | 4 ++-- .ci/providerlint/vendor/modules.txt | 2 +- 9 files changed, 14 insertions(+), 40 deletions(-) delete mode 100644 .ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 2ea8f34084b..8eccb4b7f72 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -49,7 +49,7 @@ require ( github.com/vmihailenco/msgpack/v5 v5.4.1 // indirect github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect github.com/zclconf/go-cty v1.14.1 // indirect - golang.org/x/crypto v0.16.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/mod v0.14.0 // indirect golang.org/x/net v0.18.0 // indirect golang.org/x/sys v0.15.0 // indirect diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index 477c0a99c76..d259a928f78 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -132,8 +132,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= diff --git a/.ci/providerlint/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/.ci/providerlint/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index f3b653a12f3..6713accac09 100644 --- a/.ci/providerlint/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/.ci/providerlint/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -199,8 +199,8 @@ TEXT ·mixBlocksSSE2(SB), 4, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX - MOVQ a+24(FP), CX - MOVQ $128, BP + MOVQ c+24(FP), CX + MOVQ $128, DI loop: MOVOU 0(AX), X0 @@ -213,7 +213,7 @@ loop: ADDQ $16, BX ADDQ $16, CX ADDQ $16, DX - SUBQ $2, BP + SUBQ $2, DI JA loop RET @@ -222,8 +222,8 @@ TEXT ·xorBlocksSSE2(SB), 4, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX - MOVQ a+24(FP), CX - MOVQ $128, BP + MOVQ c+24(FP), CX + MOVQ $128, DI loop: MOVOU 0(AX), X0 @@ -238,6 +238,6 @@ loop: ADDQ $16, BX ADDQ $16, CX ADDQ $16, DX - SUBQ $2, BP + SUBQ $2, DI JA loop RET diff --git a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 4f506f87912..199c21d27aa 100644 --- a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && amd64 && gc && !purego +//go:build amd64 && gc && !purego package blake2b diff --git a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 353bb7cac5f..9ae8206c201 100644 --- a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.7 && amd64 && gc && !purego +//go:build amd64 && gc && !purego #include "textflag.h" diff --git a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go deleted file mode 100644 index 1d0770abba4..00000000000 --- a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 && amd64 && gc && !purego - -package blake2b - -import "golang.org/x/sys/cpu" - -func init() { - useSSE4 = cpu.X86.HasSSE41 -} - -//go:noescape -func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) - -func hashBlocks(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) { - if useSSE4 { - hashBlocksSSE4(h, c, flag, blocks) - } else { - hashBlocksGeneric(h, c, flag, blocks) - } -} diff --git a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/register.go b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/register.go index d9fcac3a4de..54e446e1d2c 100644 --- a/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/register.go +++ b/.ci/providerlint/vendor/golang.org/x/crypto/blake2b/register.go @@ -2,8 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.9 - package blake2b import ( diff --git a/.ci/providerlint/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s b/.ci/providerlint/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s index 8fb26aebb2e..1f539388619 100644 --- a/.ci/providerlint/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s +++ b/.ci/providerlint/vendor/golang.org/x/crypto/sha3/keccakf_amd64.s @@ -319,9 +319,9 @@ MOVQ rDi, _si(oState); \ MOVQ rDo, _so(oState) \ -// func keccakF1600(state *[25]uint64) +// func keccakF1600(a *[25]uint64) TEXT ·keccakF1600(SB), 0, $200-8 - MOVQ state+0(FP), rpState + MOVQ a+0(FP), rpState // Convert the user state into an internal state NOTQ _be(rpState) diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index 290e639c817..0af3b9322a2 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -386,7 +386,7 @@ github.com/zclconf/go-cty/cty/function/stdlib github.com/zclconf/go-cty/cty/gocty github.com/zclconf/go-cty/cty/json github.com/zclconf/go-cty/cty/set -# golang.org/x/crypto v0.16.0 +# golang.org/x/crypto v0.17.0 ## explicit; go 1.18 golang.org/x/crypto/argon2 golang.org/x/crypto/blake2b From 7123be68a0285dedfaed1111f060c4dd9ddc8e01 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 19 Dec 2023 15:03:39 +0000 Subject: [PATCH 327/438] Update CHANGELOG.md for #34981 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 53e46b675d2..a95d67031c6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,14 +1,20 @@ ## 5.32.0 (Unreleased) +FEATURES: + +* **New Resource:** `aws_ssoadmin_application_access_scope` ([#34811](https://github.com/hashicorp/terraform-provider-aws/issues/34811)) + ENHANCEMENTS: * data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) +* resource/aws_s3_bucket: Modify server-side encryption configuration error handling, enabling support for NetApp StorageGRID ([#34890](https://github.com/hashicorp/terraform-provider-aws/issues/34890)) BUG FIXES: * data-source/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) * resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created ([#34848](https://github.com/hashicorp/terraform-provider-aws/issues/34848)) * resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) +* resource/aws_memorydb_cluster: Treat `snapshotting` status as pending when creating cluster ([#31077](https://github.com/hashicorp/terraform-provider-aws/issues/31077)) ## 5.31.0 (December 15, 2023) From 30b2cd4fadfc642500fd5cb69c852476da1b8aed Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:06:12 -0500 Subject: [PATCH 328/438] build(deps): bump golang.org/x/crypto in /.ci/tools (#34978) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0. - [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .ci/tools/go.mod | 8 ++++---- .ci/tools/go.sum | 16 ++++++++-------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.ci/tools/go.mod b/.ci/tools/go.mod index dc9664cc647..e6339444db8 100644 --- a/.ci/tools/go.mod +++ b/.ci/tools/go.mod @@ -290,16 +290,16 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.24.0 // indirect - golang.org/x/crypto v0.14.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect golang.org/x/mod v0.13.0 // indirect golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.11.0 // indirect golang.org/x/sync v0.4.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect + golang.org/x/text v0.14.0 // indirect golang.org/x/tools v0.14.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/api v0.126.0 // indirect diff --git a/.ci/tools/go.sum b/.ci/tools/go.sum index 4ba21e79ed6..38b1e9cf160 100644 --- a/.ci/tools/go.sum +++ b/.ci/tools/go.sum @@ -1202,8 +1202,8 @@ golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1468,8 +1468,8 @@ golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1479,8 +1479,8 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1497,8 +1497,8 @@ golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= From f38d0412d0f62a10f0cae8e16ffaf0b961b9981d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 19 Dec 2023 10:07:03 -0500 Subject: [PATCH 329/438] build(deps): bump golang.org/x/crypto in /tools/tfsdk2fw (#34979) Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.15.0 to 0.17.0. - [Commits](https://github.com/golang/crypto/compare/v0.15.0...v0.17.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-type: indirect ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- tools/tfsdk2fw/go.mod | 241 +++++++++++---------- tools/tfsdk2fw/go.sum | 489 ++++++++++++++++++++++-------------------- 2 files changed, 378 insertions(+), 352 deletions(-) diff --git a/tools/tfsdk2fw/go.mod b/tools/tfsdk2fw/go.mod index 604b1ee21b2..0587beb6a10 100644 --- a/tools/tfsdk2fw/go.mod +++ b/tools/tfsdk2fw/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/tools/tfsdk2fw go 1.20 require ( - github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 + github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 github.com/hashicorp/terraform-provider-aws v1.60.1-0.20220322001452-8f7a597d0c24 golang.org/x/exp v0.0.0-20231006140011-7918f672742d ) @@ -17,107 +17,116 @@ require ( github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect - github.com/aws/aws-sdk-go v1.48.4 // indirect - github.com/aws/aws-sdk-go-v2 v1.23.1 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 // indirect - github.com/aws/aws-sdk-go-v2/config v1.25.5 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.14.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4 // indirect - github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.24.0 // indirect - github.com/aws/aws-sdk-go-v2/service/account v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/acm v1.21.3 // indirect - github.com/aws/aws-sdk-go-v2/service/appconfig v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/appflow v1.38.3 // indirect - github.com/aws/aws-sdk-go-v2/service/apprunner v1.24.4 // indirect - github.com/aws/aws-sdk-go-v2/service/athena v1.35.1 // indirect - github.com/aws/aws-sdk-go-v2/service/auditmanager v1.29.3 // indirect - github.com/aws/aws-sdk-go-v2/service/bedrock v1.3.3 // indirect - github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.11.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.14.4 // indirect - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.9.1 // indirect - github.com/aws/aws-sdk-go-v2/service/codedeploy v1.20.3 // indirect - github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.20.0 // indirect - github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/comprehend v1.28.2 // indirect - github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.30.0 // indirect - github.com/aws/aws-sdk-go-v2/service/connectcases v1.11.3 // indirect - github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.32.3 // indirect - github.com/aws/aws-sdk-go-v2/service/directoryservice v1.21.3 // indirect - github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ec2 v1.137.1 // indirect - github.com/aws/aws-sdk-go-v2/service/eks v1.34.0 // indirect - github.com/aws/aws-sdk-go-v2/service/emr v1.34.1 // indirect - github.com/aws/aws-sdk-go-v2/service/emrserverless v1.13.4 // indirect - github.com/aws/aws-sdk-go-v2/service/evidently v1.15.3 // indirect - github.com/aws/aws-sdk-go-v2/service/finspace v1.17.1 // indirect - github.com/aws/aws-sdk-go-v2/service/fis v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/glacier v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/healthlake v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/iam v1.27.2 // indirect - github.com/aws/aws-sdk-go-v2/service/identitystore v1.20.3 // indirect - github.com/aws/aws-sdk-go-v2/service/inspector2 v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4 // indirect - github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.9.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ivschat v1.9.1 // indirect - github.com/aws/aws-sdk-go-v2/service/kafka v1.27.1 // indirect - github.com/aws/aws-sdk-go-v2/service/kendra v1.46.3 // indirect - github.com/aws/aws-sdk-go-v2/service/keyspaces v1.6.3 // indirect - github.com/aws/aws-sdk-go-v2/service/lambda v1.48.1 // indirect - github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.37.0 // indirect - github.com/aws/aws-sdk-go-v2/service/lightsail v1.31.3 // indirect - github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.23.3 // indirect - github.com/aws/aws-sdk-go-v2/service/medialive v1.41.1 // indirect - github.com/aws/aws-sdk-go-v2/service/mediapackage v1.27.1 // indirect - github.com/aws/aws-sdk-go-v2/service/oam v1.6.3 // indirect - github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/osis v1.5.1 // indirect - github.com/aws/aws-sdk-go-v2/service/pipes v1.8.1 // indirect - github.com/aws/aws-sdk-go-v2/service/pricing v1.23.3 // indirect - github.com/aws/aws-sdk-go-v2/service/qldb v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/rbin v1.12.3 // indirect - github.com/aws/aws-sdk-go-v2/service/rds v1.63.2 // indirect - github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.22.3 // indirect - github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.7.2 // indirect - github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/service/route53domains v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/s3 v1.45.0 // indirect - github.com/aws/aws-sdk-go-v2/service/s3control v1.39.0 // indirect - github.com/aws/aws-sdk-go-v2/service/scheduler v1.5.3 // indirect - github.com/aws/aws-sdk-go-v2/service/securitylake v1.9.3 // indirect - github.com/aws/aws-sdk-go-v2/service/servicequotas v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sesv2 v1.23.3 // indirect - github.com/aws/aws-sdk-go-v2/service/signer v1.18.4 // indirect - github.com/aws/aws-sdk-go-v2/service/sns v1.25.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sqs v1.28.2 // indirect - github.com/aws/aws-sdk-go-v2/service/ssm v1.43.1 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.26.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.17.3 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 // indirect - github.com/aws/aws-sdk-go-v2/service/swf v1.19.3 // indirect - github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.22.3 // indirect - github.com/aws/aws-sdk-go-v2/service/transcribe v1.32.0 // indirect - github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.6.1 // indirect - github.com/aws/aws-sdk-go-v2/service/vpclattice v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/service/workspaces v1.34.0 // indirect - github.com/aws/aws-sdk-go-v2/service/xray v1.22.3 // indirect - github.com/aws/smithy-go v1.17.0 // indirect + github.com/aws/aws-sdk-go v1.49.4 // indirect + github.com/aws/aws-sdk-go-v2 v1.24.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect + github.com/aws/aws-sdk-go-v2/config v1.26.1 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5 // indirect + github.com/aws/aws-sdk-go-v2/service/account v1.14.5 // indirect + github.com/aws/aws-sdk-go-v2/service/acm v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.5 // indirect + github.com/aws/aws-sdk-go-v2/service/appfabric v1.5.5 // indirect + github.com/aws/aws-sdk-go-v2/service/appflow v1.39.5 // indirect + github.com/aws/aws-sdk-go-v2/service/apprunner v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/athena v1.37.3 // indirect + github.com/aws/aws-sdk-go-v2/service/auditmanager v1.30.5 // indirect + github.com/aws/aws-sdk-go-v2/service/bedrock v1.5.5 // indirect + github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.13.5 // indirect + github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5 // indirect + github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5 // indirect + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0 // indirect + github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1 // indirect + github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/comprehend v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.31.5 // indirect + github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5 // indirect + github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6 // indirect + github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 // indirect + github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 // indirect + github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 // indirect + github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 // indirect + github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 // indirect + github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 // indirect + github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 // indirect + github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 // indirect + github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5 // indirect + github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/iam v1.28.5 // indirect + github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.10 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kafka v1.28.5 // indirect + github.com/aws/aws-sdk-go-v2/service/kendra v1.47.5 // indirect + github.com/aws/aws-sdk-go-v2/service/keyspaces v1.7.5 // indirect + github.com/aws/aws-sdk-go-v2/service/lambda v1.49.5 // indirect + github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.38.5 // indirect + github.com/aws/aws-sdk-go-v2/service/lightsail v1.32.5 // indirect + github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.5 // indirect + github.com/aws/aws-sdk-go-v2/service/medialive v1.43.3 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackage v1.28.5 // indirect + github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.7.5 // indirect + github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 // indirect + github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 // indirect + github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 // indirect + github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 // indirect + github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 // indirect + github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 // indirect + github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 // indirect + github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3 // indirect + github.com/aws/aws-sdk-go-v2/service/rds v1.64.6 // indirect + github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5 // indirect + github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5 // indirect + github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5 // indirect + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 // indirect + github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 // indirect + github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 // indirect + github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5 // indirect + github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5 // indirect + github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0 // indirect + github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5 // indirect + github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sesv2 v1.24.5 // indirect + github.com/aws/aws-sdk-go-v2/service/signer v1.19.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sns v1.26.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sqs v1.29.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssm v1.44.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.27.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.23.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect + github.com/aws/aws-sdk-go-v2/service/swf v1.20.5 // indirect + github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.23.6 // indirect + github.com/aws/aws-sdk-go-v2/service/transcribe v1.34.5 // indirect + github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3 // indirect + github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5 // indirect + github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 // indirect + github.com/aws/aws-sdk-go-v2/service/xray v1.23.5 // indirect + github.com/aws/smithy-go v1.19.0 // indirect github.com/beevik/etree v1.2.0 // indirect github.com/bgentry/speakeasy v0.1.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect @@ -130,8 +139,8 @@ require ( github.com/google/go-cmp v0.6.0 // indirect github.com/google/uuid v1.3.1 // indirect github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.21.0 // indirect - github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.42 // indirect - github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.43 // indirect + github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.45 // indirect + github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.46 // indirect github.com/hashicorp/awspolicyequivalence v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect @@ -139,21 +148,21 @@ require ( github.com/hashicorp/go-cty v1.4.1-0.20200723130312-85980079f637 // indirect github.com/hashicorp/go-hclog v1.5.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect - github.com/hashicorp/go-plugin v1.5.2 // indirect + github.com/hashicorp/go-plugin v1.6.0 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/hashicorp/hc-install v0.6.1 // indirect + github.com/hashicorp/hc-install v0.6.2 // indirect github.com/hashicorp/hcl/v2 v2.19.1 // indirect github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.19.0 // indirect - github.com/hashicorp/terraform-json v0.17.1 // indirect + github.com/hashicorp/terraform-json v0.18.0 // indirect github.com/hashicorp/terraform-plugin-framework v1.4.2 // indirect github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 // indirect github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.19.1 // indirect + github.com/hashicorp/terraform-plugin-go v0.20.0 // indirect github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect - github.com/hashicorp/terraform-plugin-mux v0.12.0 // indirect - github.com/hashicorp/terraform-plugin-testing v1.5.1 // indirect + github.com/hashicorp/terraform-plugin-mux v0.13.0 // indirect + github.com/hashicorp/terraform-plugin-testing v1.6.0 // indirect github.com/hashicorp/terraform-registry-address v0.2.3 // indirect github.com/hashicorp/terraform-svchost v0.1.1 // indirect github.com/hashicorp/yamux v0.1.1 // indirect @@ -186,14 +195,14 @@ require ( go.opentelemetry.io/otel v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/crypto v0.15.0 // indirect - golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.18.0 // indirect - golang.org/x/sys v0.14.0 // indirect + golang.org/x/crypto v0.17.0 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/net v0.19.0 // indirect + golang.org/x/sys v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect - google.golang.org/grpc v1.59.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect + google.golang.org/grpc v1.60.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/tools/tfsdk2fw/go.sum b/tools/tfsdk2fw/go.sum index eff9bbf650f..3413ef7408b 100644 --- a/tools/tfsdk2fw/go.sum +++ b/tools/tfsdk2fw/go.sum @@ -13,7 +13,6 @@ github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c h1:kMFnB0vCcX github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= github.com/YakDriver/regexache v0.23.0 h1:kv3j4XKhbx/vqUilSBgizXDUXHvvH1KdYekdmGwz4C4= github.com/YakDriver/regexache v0.23.0/go.mod h1:K4BZ3MYKAqSFbYWqmbsG+OzYUDyJjnMEr27DJEsVG3U= -github.com/acomagu/bufpipe v1.0.4 h1:e3H4WUzM3npvo5uv95QuJM3cQspFNtFBzvJ2oNjKIDQ= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= @@ -22,208 +21,226 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.48.4 h1:HS2L7ynVhkcRrQRro9CLJZ/xLRb4UOzDEfPzgevZwXM= -github.com/aws/aws-sdk-go v1.48.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= -github.com/aws/aws-sdk-go-v2 v1.23.1 h1:qXaFsOOMA+HsZtX8WoCa+gJnbyW7qyFFBlPqvTSzbaI= -github.com/aws/aws-sdk-go-v2 v1.23.1/go.mod h1:i1XDttT4rnf6vxc9AuskLc6s7XBee8rlLilKlc03uAA= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1 h1:ZY3108YtBNq96jNZTICHxN1gSBSbnvIdYwwqnvCV4Mc= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.1/go.mod h1:t8PYl/6LzdAqsU4/9tz28V/kU+asFePvpOMkdul0gEQ= -github.com/aws/aws-sdk-go-v2/config v1.25.5 h1:UGKm9hpQS2hoK8CEJ1BzAW8NbUpvwDJJ4lyqXSzu8bk= -github.com/aws/aws-sdk-go-v2/config v1.25.5/go.mod h1:Bf4gDvy4ZcFIK0rqDu1wp9wrubNba2DojiPB2rt6nvI= -github.com/aws/aws-sdk-go-v2/credentials v1.16.4 h1:i7UQYYDSJrtc30RSwJwfBKwLFNnBTiICqAJ0pPdum8E= -github.com/aws/aws-sdk-go-v2/credentials v1.16.4/go.mod h1:Kdh/okh+//vQ/AjEt81CjvkTo64+/zIE4OewP7RpfXk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5 h1:KehRNiVzIfAcj6gw98zotVbb/K67taJE0fkfgM6vzqU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.5/go.mod h1:VhnExhw6uXy9QzetvpXDolo1/hjhx4u9qukBGkuUwjs= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.14.3 h1:edTeIcLVO/gefaQ4VdKeFaI4ygdSZ7s/eCWYo0kBxAc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.14.3/go.mod h1:3rp61zCDi1E//0vHdx2ULc5eLlo0JOqVd1hxmks6S84= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4 h1:LAm3Ycm9HJfbSCd5I+wqC2S9Ej7FPrgr5CQoOljJZcE= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.4/go.mod h1:xEhvbJcyUf/31yfGSQBe01fukXwXJ0gxDp7rLfymWE0= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4 h1:4GV0kKZzUxiWxSVpn/9gwR0g21NF1Jsyduzo9rHgC/Q= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.4/go.mod h1:dYvTNAggxDZy6y1AF7YDwXsPuHFy/VNEpEI/2dWK9IU= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1 h1:uR9lXYjdPX0xY+NhvaJ4dD8rpSRz5VY81ccIIoNG+lw= -github.com/aws/aws-sdk-go-v2/internal/ini v1.7.1/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4 h1:40Q4X5ebZruRtknEZH/bg91sT5pR853F7/1X9QRbI54= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.4/go.mod h1:u77N7eEECzUv7F0xl2gcfK/vzc8wcjWobpy+DcrLJ5E= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.24.0 h1:R7phBXqQe58xgGuoI443zqIqLH0py/dmfTBO7WTehec= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.24.0/go.mod h1:/FOvyPLvQZXrl3vXWeZ0h2NNjJDisqbqDtLiVjke6zQ= -github.com/aws/aws-sdk-go-v2/service/account v1.13.3 h1:KF3N6GZ+iKMFXd+vlcBS98HaVbXGyqE4Gw17tbcDpQQ= -github.com/aws/aws-sdk-go-v2/service/account v1.13.3/go.mod h1:vrBsD4qqLoj0NmuYQcfSRWgkN6QM/0ufy2DD+48cuEE= -github.com/aws/aws-sdk-go-v2/service/acm v1.21.3 h1:C6ckTRBfnKJSoKoAtaGWavXRd3Ab4FW+BWQbFRrA+xw= -github.com/aws/aws-sdk-go-v2/service/acm v1.21.3/go.mod h1:YUgKNkePKTcwkkDhRAOuzxvesc+r+zKAPlkXft51jpw= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.25.3 h1:68bxzI7wWa1nJT+N7AR/r01aKQb0KsD0WhEcMNv6jSg= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.25.3/go.mod h1:YO0HH+oQubpBQiAC4fWja3B43K4YTHznuaXEGe0rauY= -github.com/aws/aws-sdk-go-v2/service/appflow v1.38.3 h1:hoz+OCM2E0y4SXllN2IFgCTOH+0VPYym2Iqxk/ttuw4= -github.com/aws/aws-sdk-go-v2/service/appflow v1.38.3/go.mod h1:9X+vMl265vQZozYKOXdqsvY5ZaiL9T3KPQLOkFBYFLc= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.24.4 h1:PgmMPDrOO5uIngULC2A+P0BUJssu/rZ1e2UzAS++Zs4= -github.com/aws/aws-sdk-go-v2/service/apprunner v1.24.4/go.mod h1:ecWPZOErcCqw4mHgt4W3tSFTEH6vGV4IV6qNKmY/BMQ= -github.com/aws/aws-sdk-go-v2/service/athena v1.35.1 h1:oOucUl/I9XOnIb+L9ZBY2gyQmPk389qRGmuam72EWtI= -github.com/aws/aws-sdk-go-v2/service/athena v1.35.1/go.mod h1:CAh4Navj8gOz4Y12wWUleeqY5fKkAaT80ZWo23Uz+YI= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.29.3 h1:VfmZ6+gp3Vifn0PDazz49Vll/OcQofnS85nBUrXg2Ak= -github.com/aws/aws-sdk-go-v2/service/auditmanager v1.29.3/go.mod h1:fn0+9GBLXIQ0T4SAhPBEAMtQhm7ZOwm8McfkDux25Ww= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.3.3 h1:h6+XbxwHszWSpuO4Ldmn3vA0epaLXZEr54lN80IW0G4= -github.com/aws/aws-sdk-go-v2/service/bedrock v1.3.3/go.mod h1:fkGjFqdw1DkTgAm7EmJFiIHYc4zlK8w28WLSQ6h2Puc= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.12.3 h1:tEVf24GFbnT8quIRKV/whxu5BOlPO6UlK4sgKvdxGis= -github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.12.3/go.mod h1:dcIkt/2tEiw290q2VT6Fb9zGhsm5FPLjW4XiMA/j0v4= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.11.4 h1:k3NDH46qZo8LJcdfRLEZSjVgQ3IETIn8LKWXQLf1de0= -github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.11.4/go.mod h1:6zlssa2FjPomAAoo10fxfr9pNnAiwRGsomO779RcI10= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.7.2 h1:c0uJZpFl9hX54vwgzu+BxBzbs4CRLv8LaLWDs5rdmvQ= -github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.7.2/go.mod h1:QolOgVOAGbv40zS4dBszQ0uTOuHp9LYqtXmahlIS9zY= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.14.4 h1:gw4uRMA2hMWRBfJdag1jHqeHrd9BcWcREjXtvXNJt+w= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.14.4/go.mod h1:JtAggGdgIizSHGyonVSlXWYq/qnvmpkJKsacVaorSII= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.28.0 h1:7XDP8uP3hsQboGcZ7f6tNAdYIKWRCjmeLx1sRKJo+jY= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.28.0/go.mod h1:NRP65i31tm0UhGwc9j6TGwk7dMs1ZDprZPIHfr+gHCU= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.9.1 h1:7uCeSCXVevOZxPgVp4rLmfRI/p7CGpHHUIO5k2wQYPU= -github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.9.1/go.mod h1:qvueIFuOQR/uiXrNkxaYNBmoETYwcwBxx6m3ymTxeIw= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.20.3 h1:rGqIKTmugpZ7lEzXTmbiPg45Id09UQpB2YoGaE0J6T4= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.20.3/go.mod h1:A7i1lQClkFz09enKv5WYKb8a2lf9QeeI1s9dNiym3hg= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.20.0 h1:rnUEqvW37avlcBYpEhCdNCiD7iFy9svbKmBxSPWTjgA= -github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.20.0/go.mod h1:SP9GRVKaf1KBmiBAabqfalozQhCVBmMdUmBOQ7vqemM= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.19.3 h1:CKbMK3mFx9i83yRx/lIAaUhnwGsoNK41lZkZRBOiHdg= -github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.19.3/go.mod h1:Jn4GBTLbIs8g10uxXvj3bidyg2LhmH5HTcWEuPj35E8= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.28.2 h1:dKDJPoagCLBuoFiFwvstKLDtrdAOO+aVsu+tyOG8EgA= -github.com/aws/aws-sdk-go-v2/service/comprehend v1.28.2/go.mod h1:xoWcb1SjG3Tp7L2kjAa/gY+cyIYKsqRORXPEf90JRi0= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.30.0 h1:BKLH9SIyRHJG3+tPy2TFZqILS9xg0wmi4zNNeSe6RmE= -github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.30.0/go.mod h1:z2ipEqlHBaieu+POGuEptwkKOBhumxLZjeO7iAJqjuk= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.11.3 h1:ONDuYy0oFEwNvmO3kBu3JLYp9BJsxVZCLv0+L5fIArA= -github.com/aws/aws-sdk-go-v2/service/connectcases v1.11.3/go.mod h1:K//ScfyKrxpEpC5VSZesWjdYtdJXCj8sCau9iSsYdMk= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.32.3 h1:GXyai+Ti0BoxNfMotRFDYywCGlTV5IzXMGyG4RV6oq4= -github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.32.3/go.mod h1:ALbeVeoYNXakmRHRZBIpAVRXXlesm6v5jH1037hVkac= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.21.3 h1:/cMlzdpSJjjwUzBDuIeazKQtYB2VDOqCSV3zq1sDpzg= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.21.3/go.mod h1:VgWOVWnnWT44FRq/UxU67d4QQ+MaQZTw1UYcztDPkgI= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.5.3 h1:mI1Jgmduvk6yfuIziQg+j2mF/cNMt7QiGastEZr7mc0= -github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.5.3/go.mod h1:KsuFE2UcXWEw0AKVT/9JKtBUE/C+v6CAR4KMxe0MkCA= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.2 h1:O6ff5PwwgQ7QkL/XA0H+0U0mWwjkYaP9tHvbr0Ptqak= -github.com/aws/aws-sdk-go-v2/service/dynamodb v1.25.2/go.mod h1:kuVxCbsxbP/h6YTT2BfOj4s/bwXYsG3C/8Qn9gO5QJY= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.137.1 h1:J/N4ydefXQZIwKBDPtvrhxrIuP/vaaYKnAsy3bKVIvU= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.137.1/go.mod h1:hrBzQzlQQRmiaeYRQPr0SdSx6fdqP+5YcGhb97LCt8M= -github.com/aws/aws-sdk-go-v2/service/eks v1.34.0 h1:g3m365rWn0MLZagA77BSuQAzTqG8VB+azzCVtpmgnpg= -github.com/aws/aws-sdk-go-v2/service/eks v1.34.0/go.mod h1:DInudKNZjEy7SJ0KfRh4VxaqY04B52Lq2+QRuvObfNQ= -github.com/aws/aws-sdk-go-v2/service/emr v1.34.1 h1:AkAncVuiOap3LS/kLs1QdVDY9LZyYHOMzCRybLtlxJU= -github.com/aws/aws-sdk-go-v2/service/emr v1.34.1/go.mod h1:HCZK6jCgxuYABdZFqiiHE3WT2tvTFVjefMB3ZX9dOxA= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.13.4 h1:V5unlj0Ky5ZuvLkndDQlHbYe3vshVJwavUZ7aGne8oo= -github.com/aws/aws-sdk-go-v2/service/emrserverless v1.13.4/go.mod h1:EC2zElGORkIzy2vPQV5U1qNGXIDzZHjL/KWbpQD6nc0= -github.com/aws/aws-sdk-go-v2/service/evidently v1.15.3 h1:guFZcKDSE3NoijLu40GeMIs0TXIMdXGCZFEdprOMkcs= -github.com/aws/aws-sdk-go-v2/service/evidently v1.15.3/go.mod h1:59okKGh8LvG0ZhKMZMCYU++ZendXUT6dqzCufBzfzq4= -github.com/aws/aws-sdk-go-v2/service/finspace v1.17.1 h1:GcBPj8B8EiVaHOba8Pq+CS+7w7s3q8ndETwPtpleS7A= -github.com/aws/aws-sdk-go-v2/service/finspace v1.17.1/go.mod h1:waL57h/Nj7KR0Z6HvjdeJrHS7o8E/uQ33YgNQcLcrYE= -github.com/aws/aws-sdk-go-v2/service/fis v1.19.3 h1:u6/LY1KBbBazaZZGT6NPUyUurwV6e7Fg+guLOVgeI9M= -github.com/aws/aws-sdk-go-v2/service/fis v1.19.3/go.mod h1:b2RZoRyiPnFQtc+qSUmbrUrCEaeKWrkv2J/BBfNVOKM= -github.com/aws/aws-sdk-go-v2/service/glacier v1.18.3 h1:Iu/EIeu9oSzd8A5MkN4s9YoiQUHQWFqa9ih/ek8LiZI= -github.com/aws/aws-sdk-go-v2/service/glacier v1.18.3/go.mod h1:xr3tDSCenzU+9BW69R+UrLy0/ZN/cAp+j6VeuESkZDA= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.19.3 h1:6Jhor5f7I+G1LGvktQ2HSbis5cD1xkKXt/pk0ppSCnQ= -github.com/aws/aws-sdk-go-v2/service/healthlake v1.19.3/go.mod h1:I/XTw/9Uob9jlq7V8mtpw+o+XILvMmsWd+IC0B4kG0s= -github.com/aws/aws-sdk-go-v2/service/iam v1.27.2 h1:Z3a5I5kKGsuVW4kbrtHVnLGUHpEpo19zFyo6dzP2WCM= -github.com/aws/aws-sdk-go-v2/service/iam v1.27.2/go.mod h1:CYRyr95Q57xVvrcKJu3vw4jVVCZhmY1SyugM+EWXlzI= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.20.3 h1:aDlvcjQNHE7xzdn3g9vcMr/0+IzjjYjKEqj4ua2tIBc= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.20.3/go.mod h1:aN6ZOx4l7hRGw2YecqbRluRI9QvX8EoaKrcnKI+guUA= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.19.3 h1:VSL0Us1D65b3zTXV/YA/Ibr7Uet6vwoyMdIeZ0dgOJM= -github.com/aws/aws-sdk-go-v2/service/inspector2 v1.19.3/go.mod h1:WmUN3IF05j8rS6DxNmszBCq43wxgVOAtdt4MzsxGYOY= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1 h1:rpkF4n0CyFcrJUG/rNNohoTmhtWlFTRI4BsZOh9PvLs= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.1/go.mod h1:l9ymW25HOqymeU2m1gbUQ3rUIsTwKs8gYHXkqDQUhiI= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4 h1:6DRKQc+9cChgzL5gplRGusI5dBGeiEod4m/pmGbcX48= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.4/go.mod h1:s8ORvrW4g4v7IvYKIAoBg17w3GQ+XuwXDXYrQ5SkzU0= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.4 h1:yUrVjtoH+5aA7h8qFVvVOBv03K5XIcgR3r1y1lH5raw= -github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.4/go.mod h1:g10w17faXf5sqTZt8+Bu/9PIUopwgcYZDb9jvsl8M9E= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4 h1:rdovz3rEu0vZKbzoMYPTehp0E8veoE9AyfzqCr5Eeao= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.4/go.mod h1:aYCGNjyUCUelhofxlZyj63srdxWUSsBSGg5l6MCuXuE= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4 h1:o3DcfCxGDIT20pTbVKVhp3vWXOj/VvgazNJvumWeYW0= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.4/go.mod h1:Uy0KVOxuTK2ne+/PKQ+VvEeWmjMMksE17k/2RK/r5oM= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.9.1 h1:R09EQdFVDRSi4voYiKIv8Ex5piYAa2eS5MghKdyDQ3Y= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.9.1/go.mod h1:2Z0VGwjWQ3fQc2a2JrpQb8dLjb9x8XggJBj825md6Nc= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.9.1 h1:C4BVRMgVNn+iE/SJFSZsZyNCdZi2JIR7VETTNfAjiQo= -github.com/aws/aws-sdk-go-v2/service/ivschat v1.9.1/go.mod h1:+TK56cWC6euHLTbHN+Pi1+0it0WZ68lW4bgoEANPBRI= -github.com/aws/aws-sdk-go-v2/service/kafka v1.27.1 h1:bIBOmRFbwJGLAf+9vyqQoIiIiGjQmHd4C8Vxpd6xKDE= -github.com/aws/aws-sdk-go-v2/service/kafka v1.27.1/go.mod h1:YtnT/LZDxHOx7f5wW5crUU+HRqv8TOcNoiJJINxUb28= -github.com/aws/aws-sdk-go-v2/service/kendra v1.46.3 h1:yHWYW6TzVoFDCJRd0vmIJh7SmmARXEOhuvT6tegLxw0= -github.com/aws/aws-sdk-go-v2/service/kendra v1.46.3/go.mod h1:OcD+ecexkQUPImlJnLiCjCSUHEj5wKlGaY5rw7Dqr74= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.6.3 h1:SCVE+sAeGYkH32mqkkLGYUPeaMCgnPRhMFRnFSqOiXA= -github.com/aws/aws-sdk-go-v2/service/keyspaces v1.6.3/go.mod h1:+8c1AEbPhx8+j9dnIw5QBMyH4ITK6Eq3Z85TfSSQLas= -github.com/aws/aws-sdk-go-v2/service/lambda v1.48.1 h1:xVOzP4rFi0kMXUQozqInP+Yy6zldr8WTpHeVEqxMtOY= -github.com/aws/aws-sdk-go-v2/service/lambda v1.48.1/go.mod h1:7dj5Kak6A6QOeZxUgIDUWVG5+7upeEBY1ivtFDRLxSQ= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.37.0 h1:tQ/cAR16Rk2UkffPPv44BS8/T9oJlcccB3qP6Rp+cAw= -github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.37.0/go.mod h1:TpH2EO5YbvByV/uEr2frH9JwlC0XS8Pf1cDzOFbXSE4= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.31.3 h1:6khO1ztTJrkerOTUh4UiVElQ8kJqeBMRkCjh2D4oRgs= -github.com/aws/aws-sdk-go-v2/service/lightsail v1.31.3/go.mod h1:8e96Pfkyy2KVGlz7Dv7QrrCTNyLMjRL9LVUqNGyX7JA= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.23.3 h1:Klf+liB8OrhhQyVT+TdxQqDRwTW0Xgvnv+Da8wi/VEs= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.23.3/go.mod h1:6qyNNp9IBCvpOfb9S4kiZ9k0DaAyajpAnN7KP1uDmuA= -github.com/aws/aws-sdk-go-v2/service/medialive v1.41.1 h1:yarqPw/2q52gqfxYoBSPPFHv+ccwJm/ajEYK1eMRInM= -github.com/aws/aws-sdk-go-v2/service/medialive v1.41.1/go.mod h1:yji6+auCVOD6Olje6cNWn4IMJBCliOWh+60ANZa6UHI= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.27.1 h1:ZDgB+Ky0hXvLwku1kzZD7w4fLvlOridvFLuF2/4v+CA= -github.com/aws/aws-sdk-go-v2/service/mediapackage v1.27.1/go.mod h1:feuXf9Ltxnf6Re39qLdyC1Sn/bIXcAFUXyIB6hEpYbw= -github.com/aws/aws-sdk-go-v2/service/oam v1.6.3 h1:S3GlOlpESh8BCOAiFc2DDXlM1bCNnZG7xL6Z4Xz4V3g= -github.com/aws/aws-sdk-go-v2/service/oam v1.6.3/go.mod h1:pfLMa7wlMAGH4/BfAvs7T7Hx+eUnfOH6I13yEq+nmn0= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.8.3 h1:rYx9F+RnIIVZklBHyvv34tVvMBZPex2rQGbiyjYW4MU= -github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.8.3/go.mod h1:VePNBj+se/QYQpH8RTKdFx83iRMAKUev9JyOiUs8Wr4= -github.com/aws/aws-sdk-go-v2/service/osis v1.5.1 h1:anUrguxdcgTyLn/PtBvS8fvFWgaT7YnQJRarH6713FM= -github.com/aws/aws-sdk-go-v2/service/osis v1.5.1/go.mod h1:R9REkyNcMzOf8Btlgj79OcK0aMlpOW+riOwASOXr9VE= -github.com/aws/aws-sdk-go-v2/service/pipes v1.8.1 h1:aaGRX6RLKYO31cqoVuMG/XU4W/Dmnfy4H8V+gr7R28c= -github.com/aws/aws-sdk-go-v2/service/pipes v1.8.1/go.mod h1:hcr1CUx5Fk5ABqk93GlkbeRwl4mhm3cbd04SAfKw+uQ= -github.com/aws/aws-sdk-go-v2/service/pricing v1.23.3 h1:/fGaDu3SL5WeUypBfyEMA8QF0K+geeWS2v8OggmSfyE= -github.com/aws/aws-sdk-go-v2/service/pricing v1.23.3/go.mod h1:0jlJ64Pv4CU8NhD7auHlPFL6q8KR9y4aUsuxYS0ge1o= -github.com/aws/aws-sdk-go-v2/service/qldb v1.18.3 h1:LAtKPkKCtJpfYbv8KjC4BrhE0B3cudPyysbloic5s3U= -github.com/aws/aws-sdk-go-v2/service/qldb v1.18.3/go.mod h1:0SJbLjtz0rdXUPw2VgCYnFRoYdxjT69um3+/Jx2qjNs= -github.com/aws/aws-sdk-go-v2/service/rbin v1.12.3 h1:4TW1hRKwUz9KLSh7gQjw22fGueVvzU+OLiQf1nD++4U= -github.com/aws/aws-sdk-go-v2/service/rbin v1.12.3/go.mod h1:3ek97n3CuKKzfu8mMc+FXvc+rtRZXbYQIMzSFlIpVvk= -github.com/aws/aws-sdk-go-v2/service/rds v1.63.2 h1:tPpW4BtS6RYeMavI7jVNdiopvcVgv8JQzLYBxRFI2v8= -github.com/aws/aws-sdk-go-v2/service/rds v1.63.2/go.mod h1:wOD+/saE3LEwnwlaq5EHyj7yWYwz3COo0IOXAt7bAL4= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.22.3 h1:q1h5SxtCSz7lQ7eiKfBd6Fuy+ynoWw3JDcSKgcpoGgc= -github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.22.3/go.mod h1:T5fhDCo61UBeegUKVKwdAoOb0oyRpP4iK6+xbuSYcEs= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.7.2 h1:UbpNH/Cp7HIhFzvNbqFtFNvokqhl3xVcTiosKATxMrk= -github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.7.2/go.mod h1:o5OQlj9QiP4xHo8GucEWlfoOhWHTyS4+PQsPrwAj5Qk= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.18.3 h1:2G3B2UKKHsjtupjdbtTF5fY+PjPPnNJCgmHeIzIpF8c= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.18.3/go.mod h1:PbEKPOynMRNZQyA2OwTP6Srh77JarKVkx/ROU6Q6KE8= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.18.3 h1:TGaUBkC4l6XRfbEGs4kAZ07MTt2d6P6vYOaUa3Xlkas= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.18.3/go.mod h1:2KWwhq6dij2weFBZxQlCzalcKk7TJc3MZJyZS1hd77s= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.5.3 h1:3vBL9PcLYWQb+8DePzYvHBnKzvFNxVosVGNTL6HgFM0= -github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.5.3/go.mod h1:Ych4aIodWQ6aJxl/8BFDnF4p52CQUK8pJxtE18PWsXs= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.19.3 h1:bE+M7fO3XpUjcyFEjhcLkGC+f3/K3HyrQOgiUnlwgDc= -github.com/aws/aws-sdk-go-v2/service/route53domains v1.19.3/go.mod h1:5yfMeHnHhHK8WpajhjfhxpJteaxW045jdSsK2eH32Pc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.45.0 h1:qm5f24B6bg3BsVdbMd8ODEfKeadBmYlwUi9erqRfv6s= -github.com/aws/aws-sdk-go-v2/service/s3 v1.45.0/go.mod h1:dqJ5JBL0clzgHriH35Amx3LRFY6wNIPUX7QO/BerSBo= -github.com/aws/aws-sdk-go-v2/service/s3control v1.39.0 h1:j+RKem2TrXOjyKMrEOZBXn9XNmUG2Qecxl/cD0bjz9g= -github.com/aws/aws-sdk-go-v2/service/s3control v1.39.0/go.mod h1:A2vCti/i+W0KkUwDAY3jio5QpuS/tk4jhmBaTDoZ1aY= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.5.3 h1:XPNfnBEZ0uIyYQ6XrYceD+BlaC1LEyjntRYn2a8dW/8= -github.com/aws/aws-sdk-go-v2/service/scheduler v1.5.3/go.mod h1:0BCVZduXl+TWsNMMVEhMSv+0HmVxgHkMkMtZFKEh2Xw= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.9.3 h1:cFfqbyJ/LQ3lhx11tMBSx1v3I8xG+tsRXekaPn5iRrs= -github.com/aws/aws-sdk-go-v2/service/securitylake v1.9.3/go.mod h1:1dGVAKs1OwVEfZ53xy1gjcg/ojlN5DAKsD51TEpyWSs= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.18.3 h1:BEdvuuHquu0W5HTxnQKwrbc/ztc7jXH8xWrDRKDwsbE= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.18.3/go.mod h1:mAtKs5EUzehP/G5cxn7HYzu5L0Q9hCLUFDaebiy2jY0= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.23.3 h1:yriEnS4d22skDS16ULX8aHZ3/4xu6MFRjxjxax3EdJ4= -github.com/aws/aws-sdk-go-v2/service/sesv2 v1.23.3/go.mod h1:6xrtDvADN0Pt9SSwwQtYUQv2ZGwoIW/tMDG2Dgj1v0w= -github.com/aws/aws-sdk-go-v2/service/signer v1.18.4 h1:O8jBbjDHTCid5wktWNuq1Ujz4osVvCAUu/0zja2JDO0= -github.com/aws/aws-sdk-go-v2/service/signer v1.18.4/go.mod h1:Pzl9358kaC568WlbXl28y+6cc6RP9Hyt6LhgN8qh9fU= -github.com/aws/aws-sdk-go-v2/service/sns v1.25.3 h1:6/Esm0BnUNrx+yy8AaslbaeJa8V40tTJ9N+tOihYWVo= -github.com/aws/aws-sdk-go-v2/service/sns v1.25.3/go.mod h1:GkPiLToDWySwNSsR4AVam/Sv8UAZuMlGe9dozvyRCPE= -github.com/aws/aws-sdk-go-v2/service/sqs v1.28.2 h1:MVg4eLi9uM1+YHYSfcCg1CR3mqtL6UJ9SF3VrMxKmUE= -github.com/aws/aws-sdk-go-v2/service/sqs v1.28.2/go.mod h1:7vHhhnzSGZcquR6+X7V+wDHdY8iOk5ge0z+FxoxkvJw= -github.com/aws/aws-sdk-go-v2/service/ssm v1.43.1 h1:QCZGFHZnzP0yRveI5X+5Cu54wdvpbgiuF3Qy3xBykyA= -github.com/aws/aws-sdk-go-v2/service/ssm v1.43.1/go.mod h1:Iw3+XCa7ARZWsPiV3Zozf5Hb3gD7pHDLKu9Xcc4iwDM= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.19.3 h1:y31W273p64rg4jL6ZffrcS1VaDeJfs3lYedtP1vKQiU= -github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.19.3/go.mod h1:N7TZq4pO6aVgr+28vdzI+YoaxdwvnE9XyAWEVk4HLS4= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.26.1 h1:PUtIIchHXzq7NcpBAMnglcD5Vvh3BB6rkSdhfxAdVZ4= -github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.26.1/go.mod h1:ZYp2WYPUHVvCgUOkBqJO8bCCjO9lyQzunkJjkoMmsFw= -github.com/aws/aws-sdk-go-v2/service/sso v1.17.3 h1:CdsSOGlFF3Pn+koXOIpTtvX7st0IuGsZ8kJqcWMlX54= -github.com/aws/aws-sdk-go-v2/service/sso v1.17.3/go.mod h1:oA6VjNsLll2eVuUoF2D+CMyORgNzPEW/3PyUdq6WQjI= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1 h1:cbRqFTVnJV+KRpwFl76GJdIZJKKCdTPnjUZ7uWh3pIU= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.20.1/go.mod h1:hHL974p5auvXlZPIjJTblXJpbkfK4klBczlsEaMCGVY= -github.com/aws/aws-sdk-go-v2/service/sts v1.25.4 h1:yEvZ4neOQ/KpUqyR+X0ycUTW/kVRNR4nDZ38wStHGAA= -github.com/aws/aws-sdk-go-v2/service/sts v1.25.4/go.mod h1:feTnm2Tk/pJxdX+eooEsxvlvTWBvDm6CasRZ+JOs2IY= -github.com/aws/aws-sdk-go-v2/service/swf v1.19.3 h1:V8QA8sb+ov6zVwrYUtvALq+dTMSy8EF3qRcRvN7SQlY= -github.com/aws/aws-sdk-go-v2/service/swf v1.19.3/go.mod h1:fDcgEmY/qcPgXVXAKoPbDCuO0/w0mrdQot7e/ZcCDFk= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.22.3 h1:UiXy4+zKKNyojUWCm503oZKVhv5o31FKqXVnXP+7a8A= -github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.22.3/go.mod h1:RbPi0d7mbqDCVbTGx7RZAk18foBWl/8Xr9lKQycwayM= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.32.0 h1:8zGiB2EW+vk1mjg2q/hJU/E6+YMOOl4WpezRPUXo6pQ= -github.com/aws/aws-sdk-go-v2/service/transcribe v1.32.0/go.mod h1:0y+iVkE1SHO0+Athq1sQGstKIwl+HNXVRPro21cXQuo= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.6.1 h1:VMpHz+mUTAlL67JgDCr2dc0InZFlrfPL/yM2Y6BmbU8= -github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.6.1/go.mod h1:xNNgvDTEbZ9A/38LCQrtGCmlQ7aRGjGwVsIoGXVTESA= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.4.3 h1:WD7x9MEgzwOD4V6iJF442kxrUTjU0MoUjqFhJRO1AXE= -github.com/aws/aws-sdk-go-v2/service/vpclattice v1.4.3/go.mod h1:JECSzkP4PNPWrvUcv9aQDdadFqUrOeERA6k1U6DG5ms= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.34.0 h1:BzFQiEXnhsFolEPOKacnxGolyYqN5y0GGZrVQinoxQA= -github.com/aws/aws-sdk-go-v2/service/workspaces v1.34.0/go.mod h1:lv0HNcfWj2lb2OPJ71H+bEq3za28BPCFNEPQwZ+HYgE= -github.com/aws/aws-sdk-go-v2/service/xray v1.22.3 h1:ZhSXLLVeP+uUHQgc0Jq/UpmodQcSi8oV9MxqlJu1LlM= -github.com/aws/aws-sdk-go-v2/service/xray v1.22.3/go.mod h1:G5ck/1GXqf1iLI6btiiSgCXLSihyHfrcauYwHdYNzv4= -github.com/aws/smithy-go v1.17.0 h1:wWJD7LX6PBV6etBUwO0zElG0nWN9rUhp0WdYeHSHAaI= -github.com/aws/smithy-go v1.17.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= +github.com/aws/aws-sdk-go v1.49.4 h1:qiXsqEeLLhdLgUIyfr5ot+N/dGPWALmtM1SetRmbUlY= +github.com/aws/aws-sdk-go v1.49.4/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= +github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= +github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o= +github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7 h1:FnLf60PtjXp8ZOzQfhJVsqF0OtYKQZWQfqOLshh8YXg= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.7/go.mod h1:tDVvl8hyU6E9B8TrnNrZQEVkQlB8hjJwcgpPhgtlnNg= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9/go.mod h1:hqamLz7g1/4EJP+GH5NBhcUMLjW+gKLQabgyz6/7WAU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5 h1:UdqJHYgBmOYhVA1ixaECd4MTS7EoqWdDeP87YuDauB0= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5/go.mod h1:grUsKCGlUQ80qedCiWN8LMlqmm97v81jr/sM1GXBjfg= +github.com/aws/aws-sdk-go-v2/service/account v1.14.5 h1:sAXBYGqq4J/cPrtBrzXbEOSiYToW69qVF7heXDzcGKE= +github.com/aws/aws-sdk-go-v2/service/account v1.14.5/go.mod h1:fvSp4SHBg07Gig7K7mEsO1XUK1jnT+BZRg6oWiOMigY= +github.com/aws/aws-sdk-go-v2/service/acm v1.22.5 h1:GNTWQH4PWazAsb3VXePxGKwzi7OiU8AedMajRJoQEQ8= +github.com/aws/aws-sdk-go-v2/service/acm v1.22.5/go.mod h1:yAwtFXtwrusYjymwgH4ofDG3by5KZvoBt8m87zYzotY= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.5 h1:voFN9YKZU3UDxLpp+5vZ8IUXELHNrDx1nV1kH2TQbyg= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.5/go.mod h1:HFw+8y3iu+08dKbz+IhHyGcZsLx9cq/NrMmL8rOdD0U= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.5.5 h1:Geh29to5AKWZfFYToHQ4UxNrwvtyLN4ZoNhN+qMldIg= +github.com/aws/aws-sdk-go-v2/service/appfabric v1.5.5/go.mod h1:ET7VpGqYxeyYtA7JmNZQ3+YPZ+lz98P3OvghaTdwaFE= +github.com/aws/aws-sdk-go-v2/service/appflow v1.39.5 h1:zLBG7nAE9TG2WBpHjhMRNOdRDEQ9ylI7Jb865MKv+KE= +github.com/aws/aws-sdk-go-v2/service/appflow v1.39.5/go.mod h1:Di+aOZKKn4b6IMnBjMhXb6flOJUMnMJfRuQgTnTpXNU= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.25.5 h1:YpUU6SUj7EYB+l4kGQk2ch5Vz+HK/cn2bfYsJk81+GI= +github.com/aws/aws-sdk-go-v2/service/apprunner v1.25.5/go.mod h1:W1Z87aVGwtb3egdiWLrnMyj5oHzShACix2iebsixuVA= +github.com/aws/aws-sdk-go-v2/service/athena v1.37.3 h1:qNLkDi/rOaauOuh33a4MNZjyfxvwIgC5qsDiHPvjDk0= +github.com/aws/aws-sdk-go-v2/service/athena v1.37.3/go.mod h1:MlpC6swcjh1Il80u6XoeY2BTHIZRZWvoXOfaq3rfh8I= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.30.5 h1:sV05+Tgq3hsUBe9iLeGyZy31EJ59X5twiz/Mrzcin/o= +github.com/aws/aws-sdk-go-v2/service/auditmanager v1.30.5/go.mod h1:5zHM7qND9NI5fVAHwcNwFeIG4E51Dq8tBPrtkBg9IFk= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.5.5 h1:dnIEmiCC9JWN6k7da5lLGQ7OKwfl/rJus4vFlR053UE= +github.com/aws/aws-sdk-go-v2/service/bedrock v1.5.5/go.mod h1:BV0qlxGaHddZC0s61iQ91+bLER+H0fZBOHuD4oqB8/s= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.13.5 h1:vKjaKyzWIr4JlAvSTgPRlX9/YyZRoquOdT7qW+LFM7g= +github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.13.5/go.mod h1:iAL3lKFH5VzYfAGzXNy9tkM3Z36BSuIXZju5qK5ckXg= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5 h1:Th+kzme/nRjTdxBhvyT0VXRjwXcqLPlLdspkaGRynqs= +github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5/go.mod h1:9ETC6GsMWygbmUdP8IkjSgXNqy8pwFEgH/eehoDwYMU= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5 h1:HH9fmVqF71UES7ES8+vAnJ7/3igo5rJp1BtgScHAdHs= +github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5/go.mod h1:6nxVpS0JBdSwXDm+vo+Hwz/CJn03vu6HexNB7bQSv3Y= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5 h1:9aS9PZ/cnTVjWDIOVqgxKd+cRxP9W1MYrQhXwh/vBec= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5/go.mod h1:21V6X5ZV37Oel5VQZRZtxMj6jeqQr6sMbhuWu9oTaH0= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0 h1:CMZz/TJgt+GMKRxjuedxhMFs45GPhyst/a/7Q3DuAg4= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0/go.mod h1:4Oeb7n2r/ApBIHphQkprve380p/RpPWBotumd44EDGg= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5 h1:52hjOAJdIm0P2MWM14J7aLKtcT8SItEtdluW+5LbWSo= +github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5/go.mod h1:8GW1bxNLHWPRwtpJKNn8z0h2N6nKgoAsN4CjeAMIrLA= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1 h1:cyRoT4yeLGEQk8ad4Se82INAA8Xcu6xr1grQ684GYnQ= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1/go.mod h1:RiusqJl55/p7S8LNMh2J3ZsDHDqxRiPdsfIaZRKeEUo= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5 h1:Jw0fM7521qn4edNQKiq3KUwdxY1c3iPnnOBgIzUoXz4= +github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5/go.mod h1:RDeY2hgSGG8yoZBaBH8I9h89Wz7BVhnVRtaNaKQRELM= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.21.5 h1:AlmfzS3CBH1OMXjFU8sy7JMa1xIPC1n0Ke4zvOaEHRo= +github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.21.5/go.mod h1:vTqieaH9W3irEs13g5QuwNCOhmJUqpkIswlh4Twhq/Y= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.20.5 h1:lz+cMe5wjevIaayclzOnz5kXLR++VjHiZnVnieOpd+c= +github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.20.5/go.mod h1:oapZWAj2ivHtbARQ7WCy7Omszz1SMX4TsuvzOzf/nbQ= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.29.5 h1:nGBN3HiM7ged9yP2kCWI/8uAXBHg58bDIMLRXeHZam8= +github.com/aws/aws-sdk-go-v2/service/comprehend v1.29.5/go.mod h1:j22SPKm/C8/bzS5LdxF9DKQNZH2xDt4xBc88pcn3+w4= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.31.5 h1:64f/3D7gFxW/wAO/v48HD5pJs1eEE4gRwA9rAFEdEu4= +github.com/aws/aws-sdk-go-v2/service/computeoptimizer v1.31.5/go.mod h1:BlYY4hg0e2n3xrU/En0syXSD5KhHeDNna/aETUe0I1I= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5 h1:66XGF7vdSc6XpG7xOg2zt1fW1FzY1LB2BQardkGxK0M= +github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5/go.mod h1:R9o2YFsOY6PTlfFPacDGKL5cgesr3+ZTXA5i3PhOai4= +github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6 h1:Sb6qOCo2oD9iGJ+0gyCK/bQDNqfk9vH9rTwXsCvG8Ik= +github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6/go.mod h1:HIRn9vSg38bhAI8BlxIWXl/i8qPruJzon9kPOeD31Ng= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5 h1:a//AdeswzibpC4fkkB1X4Ql/4iWZKGyYV0lWNTRDp1w= +github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5/go.mod h1:Dst4mNfdyggL9PHmkYdSiVgJvwhfboruXtzQZpy46Xs= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 h1:i/7aXIrjTdVZtch90MSQ3EC03dh5XgTmJtbAqFtzysk= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5/go.mod h1:KTFSRANgKK34D1LNNtOkPLWVgjhbx172XAQ1cDkP+08= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 h1:ikZu83oYYnSdtc73OP1HCBXuSxQ9AXDEebHhgnTpGDA= +github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5/go.mod h1:XEY63kzpXT3wMrE6yBqWCY+K1bq5Fixq32eCZYFhwpA= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6 h1:kSdpnPOZL9NG5QHoKL5rTsdY+J+77hr+vqVMsPeyNe0= +github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6/go.mod h1:o7TD9sjdgrl8l/g2a2IkYjuhxjPy9DMP2sWo7piaRBQ= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 h1:cP43vFYAQyREOp972C+6d4+dzpxo3HolNvWfeBvr2Yg= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= +github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 h1:wLPDAUFT50NEXGXpywRU3AA74pg35RJjWol/68ruvQQ= +github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5/go.mod h1:AOHmGMoPtSY9Zm2zBuwUJQBisIvYAZeA1n7b6f4e880= +github.com/aws/aws-sdk-go-v2/service/eks v1.35.5 h1:LEYyWSnfdSSysPr5JWUkNwOD0MvXKfE/BX6Frg/lr1A= +github.com/aws/aws-sdk-go-v2/service/eks v1.35.5/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= +github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 h1:dZtEDpqYVg3i5oT8lSXxEsg6dInewHA3qNuyzHTvWck= +github.com/aws/aws-sdk-go-v2/service/emr v1.35.5/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 h1:O2ppygCppB40GS7lDJUX4dGEgEdsKkX62oIAGgre/rY= +github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= +github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 h1:qMMMld3RbqxSZ5KEokAu+w4MGV9YlSvisJbk4iMO4m0= +github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5/go.mod h1:ydI4dfZIWil2hOsneE1QWDOxY/CdC37oT96S4JOrD24= +github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 h1:n3TWZAn4gV2/GiJMnuNuSEkgyXHkKPEkenU5ZmmFS1o= +github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0/go.mod h1:FyO9e8uUMRvKJJnJRa7/1gUQTbhxuYVJXFELmetDs7o= +github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 h1:B2W4XsrlmALX+kRWYiOQ+h74rHJ/xKDRqV7EAtvJZ/g= +github.com/aws/aws-sdk-go-v2/service/fis v1.21.5/go.mod h1:9UDhIS/srJd1FNglu7iVzUsPyJyd9inpe1ctAqPHKYg= +github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5 h1:uPp9xWrEh9ui0WN1G3G7Rhgr6TAo23WwSrU06O9Cw2Q= +github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5/go.mod h1:U/zNi1isGbxK7fobrqBYLUS+7BNqMtxu49bR27ZcPYQ= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 h1:lm7KEWrkI54kso0o3qwODbJDTpEvdZyj/NoKOIheKOg= +github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5/go.mod h1:5IxzIDau0tsh8NRR6wcRp8u1Xn9QY9CcD9e34lpFqEQ= +github.com/aws/aws-sdk-go-v2/service/iam v1.28.5 h1:Ts2eDDuMLrrmd0ARlg5zSoBQUvhdthgiNnPdiykTJs0= +github.com/aws/aws-sdk-go-v2/service/iam v1.28.5/go.mod h1:kKI0gdVsf+Ev9knh/3lBJbchtX5LLNH25lAzx3KDj3Q= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5 h1:x93yL/0ey4Y/HEBSsqcLNQDDeIVRLOdziLMg3+YM/F8= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.5/go.mod h1:vs4IYQdGHOLq6DsPfSuoADmRzr/AeWIk8m50XBnwN/o= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 h1:PKwE3fh67K7Kig3LlbuipQOrNSraQuEpFl09VOpaNvc= +github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5/go.mod h1:hIgLcOPNanV8IteYZUx1YyLUJf//t0dI1F2+ecjVvlo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9 h1:/90OR2XbSYfXucBMJ4U14wrjlfleq/0SB6dZDPncgmo= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.2.9/go.mod h1:dN/Of9/fNZet7UrQQ6kTDo/VSwKPIq94vjlU16bRARc= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.10 h1:h8uweImUHGgyNKrxIUwpPs6XiH0a6DJ17hSJvFLgPAo= +github.com/aws/aws-sdk-go-v2/service/internal/endpoint-discovery v1.8.10/go.mod h1:LZKVtMBiZfdvUWgwg61Qo6kyAmE5rn9Dw36AqnycvG8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5 h1:05ZNe2xprVADbOPDOVpBiwHAkits4ftok77kqYR58Ro= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5/go.mod h1:EGOD8sGU5W6NO+TgfZeEPv3WdEB+NyCtJ5KET2kJWZI= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5 h1:c6B43g0FFZ51zIUYgHSnPv0BDP4e6DTVUw6gi1oy+wg= +github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5/go.mod h1:i+IKSFp4gZZj54Ffu0skGoV/3ilRQdLH9eIZij3pTEI= +github.com/aws/aws-sdk-go-v2/service/kafka v1.28.5 h1:yCkyZDGahaCaAkdpVx8Te05t6eW2FarBLunVC8S23nU= +github.com/aws/aws-sdk-go-v2/service/kafka v1.28.5/go.mod h1:/KmX+vXMPJGAB56reo95tnsXa6QPNx6qli4L1AmYb7E= +github.com/aws/aws-sdk-go-v2/service/kendra v1.47.5 h1:cbNxSjOL87ojmhzOmAFEZ2C3V134s+Ry0gIAkE3g1HI= +github.com/aws/aws-sdk-go-v2/service/kendra v1.47.5/go.mod h1:ZJKXlOfOrm/3tB501yY0yo9NOCWSAVsWRLYiS61GO8M= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.7.5 h1:lvhWIY+MyMYoSPBLfZsgyLkpkqAyNMX/mAkGXbkzslk= +github.com/aws/aws-sdk-go-v2/service/keyspaces v1.7.5/go.mod h1:YVdR8FtIDbHvsDkXuBa1ahRC+OhegEZY76h2k3ecLkg= +github.com/aws/aws-sdk-go-v2/service/lambda v1.49.5 h1:ZHVbzOnoj5nXxUug8iWzqg2Tmp6Jc4CE5tPfoE96qrs= +github.com/aws/aws-sdk-go-v2/service/lambda v1.49.5/go.mod h1:0V5z1X/8NA9eQ5cZSz5ZaHU8xA/hId2ZAlsHeO7Jrdk= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.38.5 h1:3brhc6+qCRptJQB49YhOlLDFJM324GrXcpMK6knozdE= +github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.38.5/go.mod h1:f+42yqPylOVSwssJ54Bk1TJDvLvGgy1SGTe/vwagfgo= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.32.5 h1:0KVnA62WGcVdeJKH+DTUkxNms2OsIky+AmB2iX93eAs= +github.com/aws/aws-sdk-go-v2/service/lightsail v1.32.5/go.mod h1:wI7palPB84YaqCYglfNiyAlDcXTFbcJ9rDHMu15cFto= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.25.5 h1:ppIqmTGLQo5emXMrMN/mQKNK5QdaYj4Wjmfpp4uMPz0= +github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.25.5/go.mod h1:ScibKBixJ/ywFZFjkmnKZmqjHwwnqqtLRBDV+XyzLoQ= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.5 h1:H0mJ0k7VH3Wctsxv3K42A7BxOvPDtJHavvoXlwc0+g0= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.5/go.mod h1:MFGDlrVZ4xLoX2BXiBa0fpEyMzEiFFWviK51g6V8Axs= +github.com/aws/aws-sdk-go-v2/service/medialive v1.43.3 h1:/Ub7sD+eD7K6FWQeuALyVApqyec5Ngk893X3VrVPN6c= +github.com/aws/aws-sdk-go-v2/service/medialive v1.43.3/go.mod h1:fH6Wz0q9JXupxmSgCFPwxymnpiX6PitFx2f/AqjuayM= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.28.5 h1:z+b1lClMC3rSxlUQqRbpGh/uMmUHWC96uQ+AzzZpens= +github.com/aws/aws-sdk-go-v2/service/mediapackage v1.28.5/go.mod h1:wGaElJ8kmGJ08nnirzZ/6iWKqBPErlHqtpkbx9go82Q= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.7.5 h1:tkFfqFu8yx0AmRZAlwcF6hdDf7E7J+0P4tRAtfVB2bA= +github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.7.5/go.mod h1:pPsl4jKNPkhp2unuSQ3upeQ+9U8onSOPA2B++m5bD8o= +github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 h1:Z5qjasrNlticGJVwZahvPiv7cnGeuEFGQ5AdCeTgf/0= +github.com/aws/aws-sdk-go-v2/service/oam v1.7.5/go.mod h1:qwJgNmAMUGFkLgAgTtkZZpGf9Qe1L0PwMD4oXMeS9Ic= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 h1:V+zBQiUAATdwx3rLbc4Em+G0IeqPtY1281lHMrTvIK4= +github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5/go.mod h1:Hky91JAG7y6hJrIoZ6IyJlB99+AFOPUIfqVQcZ+fbhY= +github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 h1:u0FL7wY1ni4WQkpfUiBslPmwKOltziQkGg5njTpPH6M= +github.com/aws/aws-sdk-go-v2/service/osis v1.6.5/go.mod h1:wRTpbH8h5d4SJmdsy9LNEuZNHrNtUCZMl+U1slAW4Ng= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 h1:cDjJ1OsUDDHP0DERFe+kon0awE0vMt+6xjd9zuOaOv8= +github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6/go.mod h1:N3pAD/7GiKZAOBFFsF9BqWdSg33HM8ibXoAyPQXgcNI= +github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 h1:/BHypWAWPEuwfnlb4hJz5R1uedDGNtorZgEHYtW/wI4= +github.com/aws/aws-sdk-go-v2/service/polly v1.36.5/go.mod h1:mmQzyk89+rKEfieMV8gHoFoVmrPiyKjqORj2Uk5+O04= +github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 h1:yJniPHxzGy0jtJNkXYTqI8ps587kl1Jf8Luz5K8Jxjs= +github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5/go.mod h1:Er8P68q9ayXFNzdTLKH9vGQ5Pq6fzqv0YYjslHxh8GE= +github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 h1:dzxL7EqY37jp4AGBbMXyZT+koN8WMCEO0XCPuLp17pw= +github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5/go.mod h1:tN5rVxOznGnV6y5gXixoL83vMOAuPTFAnqafo813M8A= +github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3 h1:5rT2pGAFgU2c/nkAZM2iDVVkLceQ04XFgkeWxKM04/4= +github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3/go.mod h1:yX/8MJOGKdhrLvzOHppNzJvBQh5OKocDq4sP3CtXxgE= +github.com/aws/aws-sdk-go-v2/service/rds v1.64.6 h1:5aUu86tGOprdKtoIClCYPC6i4xalRDztBOlXgJnQFHk= +github.com/aws/aws-sdk-go-v2/service/rds v1.64.6/go.mod h1:MYzRMSdY70kcS8AFg0aHmk/xj6VAe0UfaCCoLrBWPow= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5 h1:jGGtFvVJ7RwXtAYOxLoUzWw5WjvsO1NYWuMawL64gZU= +github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5/go.mod h1:nJQaSBV7r9td6WMmDDGKtlwE8D9BIDEDIpANfN+gMPE= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5 h1:7+BV1yNEchDbrgg/hdPVAi3jomqkoI5lqcQcTWTunGA= +github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5/go.mod h1:/zyGxTiN9z6xm3bEF4nJJLCqnbfcua+oLGrtr3xNiuE= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5 h1:WDwFoNiIKvLkQJPSYs/KGefGknjn45xKQVTW96Lpcx0= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5/go.mod h1:kHgibL7mHteV68QqxEWk/+GfSioAUZGBlz4e3Vs2r60= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 h1:vINTeQlqUbYkyKichayWejWqsMNya35Mj7XBcUZnwVI= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5/go.mod h1:Nngchp1Q7LNBS8J10r4P0npfroNRaCVz6wWNfBz7j4E= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 h1:K//BccrDBRMSQCa4UkVVYCp2y4z77arQiT2TYl88wY0= +github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6/go.mod h1:e2+mEoq1rHtFpX8p6WcgiFgnDz0zG6y1BY/g8us9g2I= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 h1:WDr8iQXuDzL6ERqRvpdIy1ZdOjg6lXlEHSo8wOJiOyI= +github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5/go.mod h1:7fnaaVoKfZaWJ8RuNYTYV3SkqD6BkFYlRuFDEkHajpc= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5 h1:Keso8lIOS+IzI2MkPZyK6G0LYcK3My2LQ+T5bxghEAY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.5/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= +github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5 h1:Rv7K8i7cvpy0XWt06r4vDKyMswLld6mnOyfs8b38534= +github.com/aws/aws-sdk-go-v2/service/s3control v1.41.5/go.mod h1:sjVex3IIN70lry8Diga0vdi1DoHFwyFXY68ols4I8VI= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5 h1:RpON5qyMUJKOGdQt0K7RUmV0zTUVSSGWtjvh/0CAqd8= +github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5/go.mod h1:CXWnhzgqEhXAYwTVg4vBZQcP+yb4KxXOkogYih2tFm8= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0 h1:ft7wTBdLlWGoZpF22CHmDywWj//MTUjyJoevEXBRHZg= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0/go.mod h1:f//4sy7Yk66HjLWyQcFb6Vtkp/HEforV7G99czcsq54= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5 h1:gZ1yiSTBmJuQ1LCDIXlFQ+1XvC91QzAwRIhJ1L4ROp0= +github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5/go.mod h1:eTBmkdUxVPP+Dy47TDGw9ZV6i7Y2oxYMFrxSkEPNO3w= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.5 h1:IN/aY5wGoRMfZJuuZrp07bvdJt9M7Nh7+alOjae7mM4= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.5/go.mod h1:mSa1Q/Q1/nAVj7nShrepbcRz1vXQFWv5sb9CFL1/4OM= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.24.5 h1:40JojNesfzskcmQvfj6UUxH1nzN4UtXWfjlSFfFqsns= +github.com/aws/aws-sdk-go-v2/service/sesv2 v1.24.5/go.mod h1:ecfOtw2ELIDKjgOxV7Zbg++MwZN0kFDqK8tLxF7uSys= +github.com/aws/aws-sdk-go-v2/service/signer v1.19.6 h1:Y4Rikb/krOWTfdy6dzQ2/WbBGRTTPcM6qAB+Mt0QKVo= +github.com/aws/aws-sdk-go-v2/service/signer v1.19.6/go.mod h1:Y3u+41K5TVVkKhSlzZ+mtUI9z1k13TxpLtbJNHhV3fA= +github.com/aws/aws-sdk-go-v2/service/sns v1.26.5 h1:umyC9zH/A1w8AXrrG7iMxT4Rfgj80FjfvLannWt5vuE= +github.com/aws/aws-sdk-go-v2/service/sns v1.26.5/go.mod h1:IrcbquqMupzndZ20BXxDxjM7XenTRhbwBOetk4+Z5oc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.5 h1:cJb4I498c1mrOVrRqYTcnLD65AFqUuseHfzHdNZHL9U= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.5/go.mod h1:mCUv04gd/7g+/HNzDB4X6dzJuygji0ckvB3Lg/TdG5Y= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.5 h1:5SI5O2tMp/7E/FqhYnaKdxbWjlCi2yujjNI/UO725iU= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.5/go.mod h1:uXndCJoDO9gpuK24rNWVCnrGNUydKFEAYAZ7UU9S0rQ= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.20.5 h1:qIzGNd+8lT3hXdq/TJ7sxGWq9xI1uKfeorwP4tYuJR0= +github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.20.5/go.mod h1:Jo4uHzInZp+heTq54nz0c71D1a2som4mlvK/jDtZSKw= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.27.5 h1:WOVvRHb2gJaaQNXkjxT5DSHazMwlycAqi4SMHnX1kyI= +github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.27.5/go.mod h1:n+AjlyOudRAgZMU/1XowAXzP5bVYizB7mkjXSsXh4wc= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.23.5 h1:WaH4tywTDnktvZFmNEMlgxJ89CjDxpedqI/AtJ0wJBs= +github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.23.5/go.mod h1:8o8oOg3mQJcmwWdjfVSILMWrSJyXiohzTFuqYMrmy6Q= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= +github.com/aws/aws-sdk-go-v2/service/swf v1.20.5 h1:9CU3kwRGpUReKubOsmxgG9LfaVpZ1PW/ON+5ZTKu5Gs= +github.com/aws/aws-sdk-go-v2/service/swf v1.20.5/go.mod h1:i01QTdCHqrntRqtNeYmxUSDCcmXERzFCePIcHDjASHE= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.23.6 h1:+7xZRneTlcraXL4+oN2kUlQX9ULh4aIxmcpUoR/faGA= +github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.23.6/go.mod h1:igyhq0ZP1mXLKnSAGrGneVjs2aELNYQkskoF/WxR3+w= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.34.5 h1:/UVYwh9hQDvXsCCJcafCKHgykfOa/EpsOfJPgiSYSSU= +github.com/aws/aws-sdk-go-v2/service/transcribe v1.34.5/go.mod h1:1lOM6vjI+sDly/6LvdON+ksgGq/IZUYLczKG4HCJaZ0= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3 h1:1L+/ZK8nGuc1HdtQpXL++zMhdMz2tYycweMeUmWazXY= +github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3/go.mod h1:QJoz7ojCJ/cT0q9sV+K9ZZBETBVoSpJXyRzvEt4BuSg= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5 h1:8AV6s1CjF1Kg4wI4Cru0vFRiQALPe3T/THLkPGCbQo0= +github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5/go.mod h1:Avxrq4VqhpuKgGdZifhrJP5a9DsDt7cESkdhaZHnYp0= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 h1:RrpjQ5xJN/AW0PCO7EGhhVsKq7BeNqkx5+h6p3QOeTU= +github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6/go.mod h1:vkYsJdF9sZl/o1eoK8tSSjzAT+R87QjswOGSTZfyO0Y= +github.com/aws/aws-sdk-go-v2/service/xray v1.23.5 h1:uCqKSGx5Esj9ZW6/zZ7tslkM65aH+qjHO3yboiRqcLo= +github.com/aws/aws-sdk-go-v2/service/xray v1.23.5/go.mod h1:VmWKTNu6V1qRG+skNKkYt7VOFohYdtOp7B2OSvpBZac= +github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= +github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw= github.com/beevik/etree v1.2.0/go.mod h1:aiPf89g/1k3AShMVAzriilpcE4R/Vuor90y83zVZWFc= github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= @@ -250,7 +267,7 @@ github.com/gertd/go-pluralize v0.2.1 h1:M3uASbVjMnTsPb0PNqg+E/24Vwigyo/tvyMTtAlL github.com/gertd/go-pluralize v0.2.1/go.mod h1:rbYaKDbsXxmRfr8uygAEKhOWsjyrrqrkHVpZvoOp8zk= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= -github.com/go-git/go-git/v5 v5.9.0 h1:cD9SFA7sHVRdJ7AYck1ZaAa/yeuBvGPxwXDL8cxrObY= +github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -273,10 +290,10 @@ github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.21.0 h1:IUypt/TbXiJBkBbE3926CgnjD8IltAitdn7Yive61DY= github.com/hashicorp/aws-cloudformation-resource-schema-sdk-go v0.21.0/go.mod h1:cdTE6F2pCKQobug+RqRaQp7Kz9hIEqiSvpPmb6E5G1w= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.42 h1:XcumID+s2NJGcHsN/0DFTLQtrWwRjLMvk/8P2uoUlLc= -github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.42/go.mod h1:yh+UKXzrK7103tYrCzMC5nd1Qdga+1PRH8JMN8ulfzk= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.43 h1:EAj06tIkFL3lD4ro0JIl0VpFShHu5USmn5JvrPwxToo= -github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.43/go.mod h1:lTTufG5gVtNqAarfzkVOCokVLrKBDy/eKEHLIgcqdfQ= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.45 h1:esKaa1l2oJiARVIa20DPxgID9V7FyFfert7X1FWg1HU= +github.com/hashicorp/aws-sdk-go-base/v2 v2.0.0-beta.45/go.mod h1:roO9Btzl+fvOFhvDN7CuPf6n60K6Yh0ykzwxhwbMK90= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.46 h1:fqKzv4gP8AQe89FMDU2HgmzFbjYZ9dlKMnyXdnEFIig= +github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2 v2.0.0-beta.46/go.mod h1:j91OF53uFDky+CuBApWHmJ3PqLAbwEsWXckzFwc9eeY= github.com/hashicorp/awspolicyequivalence v1.6.0 h1:7aadmkalbc5ewStC6g3rljx1iNvP4QyAhg2KsHx8bU8= github.com/hashicorp/awspolicyequivalence v1.6.0/go.mod h1:9IOaIHx+a7C0NfUNk1A93M7kHd5rJ19aoUx37LZGC14= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -294,37 +311,37 @@ github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVH github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-plugin v1.5.2 h1:aWv8eimFqWlsEiMrYZdPYl+FdHaBJSN4AWwGWfT1G2Y= -github.com/hashicorp/go-plugin v1.5.2/go.mod h1:w1sAEES3g3PuV/RzUrgow20W2uErMly84hhD3um1WL4= +github.com/hashicorp/go-plugin v1.6.0 h1:wgd4KxHJTVGGqWBq4QPB1i5BZNEx9BR8+OFmHDmTk8A= +github.com/hashicorp/go-plugin v1.6.0/go.mod h1:lBS5MtSSBZk0SHc66KACcjjlU6WzEVP/8pwz68aMkCI= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.1 h1:IGxShH7AVhPaSuSJpKtVi/EFORNjO+OYVJJrAtGG2mY= -github.com/hashicorp/hc-install v0.6.1/go.mod h1:0fW3jpg+wraYSnFDJ6Rlie3RvLf1bIqVIkzoon4KoVE= +github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= +github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= github.com/hashicorp/hcl/v2 v2.19.1 h1://i05Jqznmb2EXqa39Nsvyan2o5XyMowW5fnCKW5RPI= github.com/hashicorp/hcl/v2 v2.19.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= github.com/hashicorp/logutils v1.0.0 h1:dLEQVugN8vlakKOUE3ihGLTZJRB4j+M2cdTm/ORI65Y= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81SpgVtZNNtFSM= github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg= -github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA= -github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o= +github.com/hashicorp/terraform-json v0.18.0 h1:pCjgJEqqDESv4y0Tzdqfxr/edOIGkjs8keY42xfNBwU= +github.com/hashicorp/terraform-json v0.18.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= github.com/hashicorp/terraform-plugin-framework v1.4.2 h1:P7a7VP1GZbjc4rv921Xy5OckzhoiO3ig6SGxwelD2sI= github.com/hashicorp/terraform-plugin-framework v1.4.2/go.mod h1:GWl3InPFZi2wVQmdVnINPKys09s9mLmTZr95/ngLnbY= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1 h1:gm5b1kHgFFhaKFhm4h2TgvMUlNzFAtUqlcOWnWPm+9E= github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1/go.mod h1:MsjL1sQ9L7wGwzJ5RjcI6FzEMdyoBnw+XK8ZnOvQOLY= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc= github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg= -github.com/hashicorp/terraform-plugin-go v0.19.1 h1:lf/jTGTeELcz5IIbn/94mJdmnTjRYm6S6ct/JqCSr50= -github.com/hashicorp/terraform-plugin-go v0.19.1/go.mod h1:5NMIS+DXkfacX6o5HCpswda5yjkSYfKzn1Nfl9l+qRs= -github.com/hashicorp/terraform-plugin-mux v0.12.0 h1:TJlmeslQ11WlQtIFAfth0vXx+gSNgvMEng2Rn9z3WZY= -github.com/hashicorp/terraform-plugin-mux v0.12.0/go.mod h1:8MR0AgmV+Q03DIjyrAKxXyYlq2EUnYBQP8gxAAA0zeM= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0 h1:X7vB6vn5tON2b49ILa4W7mFAsndeqJ7bZFOGbVO+0Cc= -github.com/hashicorp/terraform-plugin-sdk/v2 v2.30.0/go.mod h1:ydFcxbdj6klCqYEPkPvdvFKiNGKZLUs+896ODUXCyao= -github.com/hashicorp/terraform-plugin-testing v1.5.1 h1:T4aQh9JAhmWo4+t1A7x+rnxAJHCDIYW9kXyo4sVO92c= -github.com/hashicorp/terraform-plugin-testing v1.5.1/go.mod h1:dg8clO6K59rZ8w9EshBmDp1CxTIPu3yA4iaDpX1h5u0= +github.com/hashicorp/terraform-plugin-go v0.20.0 h1:oqvoUlL+2EUbKNsJbIt3zqqZ7wi6lzn4ufkn/UA51xQ= +github.com/hashicorp/terraform-plugin-go v0.20.0/go.mod h1:Rr8LBdMlY53a3Z/HpP+ZU3/xCDqtKNCkeI9qOyT10QE= +github.com/hashicorp/terraform-plugin-mux v0.13.0 h1:79U401/3nd8CWwDGtTHc8F3miSCAS9XGtVarxSTDgwA= +github.com/hashicorp/terraform-plugin-mux v0.13.0/go.mod h1:Ndv0FtwDG2ogzH59y64f2NYimFJ6I0smRgFUKfm6dyQ= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 h1:Bl3e2ei2j/Z3Hc2HIS15Gal2KMKyLAZ2om1HCEvK6es= +github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0/go.mod h1:i2C41tszDjiWfziPQDL5R/f3Zp0gahXe5No/MIO9rCE= +github.com/hashicorp/terraform-plugin-testing v1.6.0 h1:Wsnfh+7XSVRfwcr2jZYHsnLOnZl7UeaOBvsx6dl/608= +github.com/hashicorp/terraform-plugin-testing v1.6.0/go.mod h1:cJGG0/8j9XhHaJZRC+0sXFI4uzqQZ9Az4vh6C4GJpFE= github.com/hashicorp/terraform-registry-address v0.2.3 h1:2TAiKJ1A3MAkZlH1YI/aTVcLZRu7JseiXNRHbOAyoTI= github.com/hashicorp/terraform-registry-address v0.2.3/go.mod h1:lFHA76T8jfQteVfT7caREqguFrW3c4MFSPhZB7HHgUM= github.com/hashicorp/terraform-svchost v0.1.1 h1:EZZimZ1GxdqFRinZ1tpJwVxxt49xc/S52uzrw4x0jKQ= @@ -397,7 +414,7 @@ github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/skeema/knownhosts v1.2.0 h1:h9r9cf0+u7wSE+M183ZtMGgOJKiL96brpaz5ekfJCpM= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= @@ -442,14 +459,14 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= -golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.13.0 h1:I/DsJXRlw/8l/0c24sM9yb0T4z9liZTduXvdAWYiysY= -golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= @@ -457,8 +474,8 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= -golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -479,14 +496,14 @@ golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= -golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.14.0 h1:LGK9IlZ8T9jvdy6cTdfKUCltatMFOehAQo9SRC46UQ8= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -507,10 +524,10 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= +google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= +google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= From 20e5b7f9c36357b7aac986c4c8adc85fc553e28a Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 10:22:10 -0500 Subject: [PATCH 330/438] r/aws_ssoadmin_application: send application_url only when set (#34967) This fix prevents the portal_options.sign_in_options.application_url argument from being sent on all create and update requests. Now, a value will only be sent when set to a non-nil value. --- .changelog/34967.txt | 3 +++ internal/service/ssoadmin/application.go | 7 +++++-- 2 files changed, 8 insertions(+), 2 deletions(-) create mode 100644 .changelog/34967.txt diff --git a/.changelog/34967.txt b/.changelog/34967.txt new file mode 100644 index 00000000000..042b7016d92 --- /dev/null +++ b/.changelog/34967.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_ssoadmin_application: Fix `portal_options.sign_in_options.application_url` triggering `ValidationError` when unset +``` diff --git a/internal/service/ssoadmin/application.go b/internal/service/ssoadmin/application.go index bb220164469..0b54f3f5fea 100644 --- a/internal/service/ssoadmin/application.go +++ b/internal/service/ssoadmin/application.go @@ -500,8 +500,11 @@ func expandSignInOptions(tfList []signInOptionsData) *awstypes.SignInOptions { tfObj := tfList[0] apiObject := &awstypes.SignInOptions{ - ApplicationUrl: aws.String(tfObj.ApplicationURL.ValueString()), - Origin: awstypes.SignInOrigin(tfObj.Origin.ValueString()), + Origin: awstypes.SignInOrigin(tfObj.Origin.ValueString()), + } + + if !tfObj.ApplicationURL.IsNull() { + apiObject.ApplicationUrl = aws.String(tfObj.ApplicationURL.ValueString()) } return apiObject From d0ded6b93776228f496166df6ed206a91dc08942 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 10:22:38 -0500 Subject: [PATCH 331/438] dms: Tweak acceptance test configuration names. --- .../service/dms/replication_config_test.go | 57 ++++++++++--------- internal/service/dms/replication_task_test.go | 2 +- 2 files changed, 32 insertions(+), 27 deletions(-) diff --git a/internal/service/dms/replication_config_test.go b/internal/service/dms/replication_config_test.go index fa2a9dfe610..a65f731cd41 100644 --- a/internal/service/dms/replication_config_test.go +++ b/internal/service/dms/replication_config_test.go @@ -30,7 +30,7 @@ func TestAccDMSReplicationConfig_basic(t *testing.T) { CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationConfig_basic(rName), + Config: testAccReplicationConfigConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, "arn"), @@ -78,7 +78,7 @@ func TestAccDMSReplicationConfig_disappears(t *testing.T) { CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationConfig_basic(rName), + Config: testAccReplicationConfigConfig_basic(rName), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), acctest.CheckResourceDisappears(ctx, acctest.Provider, tfdms.ResourceReplicationConfig(), resourceName), @@ -101,7 +101,7 @@ func TestAccDMSReplicationConfig_tags(t *testing.T) { CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationConfig_tags1(rName, "key1", "value1"), + Config: testAccReplicationConfigConfig_tags1(rName, "key1", "value1"), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -109,7 +109,7 @@ func TestAccDMSReplicationConfig_tags(t *testing.T) { ), }, { - Config: testAccReplicationConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Config: testAccReplicationConfigConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), @@ -118,7 +118,7 @@ func TestAccDMSReplicationConfig_tags(t *testing.T) { ), }, { - Config: testAccReplicationConfig_tags1(rName, "key2", "value2"), + Config: testAccReplicationConfigConfig_tags1(rName, "key2", "value2"), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), @@ -141,7 +141,7 @@ func TestAccDMSReplicationConfig_update(t *testing.T) { CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationConfig_update(rName, "cdc", 2, 16), + Config: testAccReplicationConfigConfig_update(rName, "cdc", 2, 16), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, "arn"), @@ -151,7 +151,7 @@ func TestAccDMSReplicationConfig_update(t *testing.T) { ), }, { - Config: testAccReplicationConfig_update(rName, "cdc", 4, 32), + Config: testAccReplicationConfigConfig_update(rName, "cdc", 4, 32), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttrSet(resourceName, "arn"), @@ -177,7 +177,7 @@ func TestAccDMSReplicationConfig_startReplication(t *testing.T) { CheckDestroy: testAccCheckReplicationConfigDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccReplicationConfig_startReplication(rName, true), + Config: testAccReplicationConfigConfig_startReplication(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "start_replication", "true"), @@ -190,7 +190,7 @@ func TestAccDMSReplicationConfig_startReplication(t *testing.T) { ImportStateVerifyIgnore: []string{"start_replication", "resource_identifier"}, }, { - Config: testAccReplicationConfig_startReplication(rName, false), + Config: testAccReplicationConfigConfig_startReplication(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckReplicationConfigExists(ctx, resourceName), resource.TestCheckResourceAttr(resourceName, "start_replication", "false"), @@ -241,14 +241,9 @@ func testAccCheckReplicationConfigDestroy(ctx context.Context) resource.TestChec } } -func testAccReplicationConfig_base(rName string) string { +// testAccRDSClustersConfig_base configures a pair of Aurora RDS clusters (and instances) ready for replication. +func testAccRDSClustersConfig_base(rName string) string { return acctest.ConfigCompose(acctest.ConfigVPCWithSubnets(rName, 2), fmt.Sprintf(` -resource "aws_dms_replication_subnet_group" "test" { - replication_subnet_group_id = %[1]q - replication_subnet_group_description = "terraform test" - subnet_ids = aws_subnet.test[*].id -} - resource "aws_db_subnet_group" "test" { name = %[1]q subnet_ids = aws_subnet.test[*].id @@ -353,6 +348,16 @@ resource "aws_rds_cluster_instance" "target" { instance_class = data.aws_rds_orderable_db_instance.test.instance_class db_subnet_group_name = aws_db_subnet_group.test.name } +`, rName)) +} + +func testAccReplicationConfigConfig_base(rName string) string { + return acctest.ConfigCompose(testAccRDSClustersConfig_base(rName), fmt.Sprintf(` +resource "aws_dms_replication_subnet_group" "test" { + replication_subnet_group_id = %[1]q + replication_subnet_group_description = "terraform test" + subnet_ids = aws_subnet.test[*].id +} resource "aws_dms_endpoint" "target" { database_name = "tftest" @@ -378,8 +383,8 @@ resource "aws_dms_endpoint" "source" { `, rName)) } -func testAccReplicationConfig_basic(rName string) string { - return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +func testAccReplicationConfigConfig_basic(rName string) string { + return acctest.ConfigCompose(testAccReplicationConfigConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_config" "test" { replication_config_identifier = %[1]q replication_type = "cdc" @@ -398,8 +403,8 @@ resource "aws_dms_replication_config" "test" { `, rName)) } -func testAccReplicationConfig_update(rName, replicationType string, minCapacity, maxCapacity int) string { - return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +func testAccReplicationConfigConfig_update(rName, replicationType string, minCapacity, maxCapacity int) string { + return acctest.ConfigCompose(testAccReplicationConfigConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_config" "test" { replication_config_identifier = %[1]q resource_identifier = %[1]q @@ -418,8 +423,8 @@ resource "aws_dms_replication_config" "test" { `, rName, replicationType, maxCapacity, minCapacity)) } -func testAccReplicationConfig_startReplication(rName string, start bool) string { - return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +func testAccReplicationConfigConfig_startReplication(rName string, start bool) string { + return acctest.ConfigCompose(testAccReplicationConfigConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_config" "test" { replication_config_identifier = %[1]q resource_identifier = %[1]q @@ -440,8 +445,8 @@ resource "aws_dms_replication_config" "test" { `, rName, start)) } -func testAccReplicationConfig_tags1(rName, tagKey1, tagValue1 string) string { - return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +func testAccReplicationConfigConfig_tags1(rName, tagKey1, tagValue1 string) string { + return acctest.ConfigCompose(testAccReplicationConfigConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_config" "test" { replication_config_identifier = %[1]q replication_type = "cdc" @@ -463,8 +468,8 @@ resource "aws_dms_replication_config" "test" { `, rName, tagKey1, tagValue1)) } -func testAccReplicationConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { - return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` +func testAccReplicationConfigConfig_tags2(rName, tagKey1, tagValue1, tagKey2, tagValue2 string) string { + return acctest.ConfigCompose(testAccReplicationConfigConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_config" "test" { replication_config_identifier = %[1]q replication_type = "cdc" diff --git a/internal/service/dms/replication_task_test.go b/internal/service/dms/replication_task_test.go index 00a9aa6764c..404202fa3ed 100644 --- a/internal/service/dms/replication_task_test.go +++ b/internal/service/dms/replication_task_test.go @@ -525,7 +525,7 @@ resource "aws_dms_replication_task" "test" { } func testAccReplicationTaskConfig_start(rName string, startTask bool, ruleName string) string { - return acctest.ConfigCompose(testAccReplicationConfig_base(rName), fmt.Sprintf(` + return acctest.ConfigCompose(testAccReplicationConfigConfig_base(rName), fmt.Sprintf(` resource "aws_dms_replication_instance" "test" { allocated_storage = 5 auto_minor_version_upgrade = true From ba227957595c95ef5f5c2b7a34b5c1c7c65a0210 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Tue, 19 Dec 2023 15:24:49 +0000 Subject: [PATCH 332/438] Update CHANGELOG.md for #34967 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a95d67031c6..2fca56ffd4b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ BUG FIXES: * resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created ([#34848](https://github.com/hashicorp/terraform-provider-aws/issues/34848)) * resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) * resource/aws_memorydb_cluster: Treat `snapshotting` status as pending when creating cluster ([#31077](https://github.com/hashicorp/terraform-provider-aws/issues/31077)) +* resource/aws_ssoadmin_application: Fix `portal_options.sign_in_options.application_url` triggering `ValidationError` when unset ([#34967](https://github.com/hashicorp/terraform-provider-aws/issues/34967)) ## 5.31.0 (December 15, 2023) From b025e27f718dcc91b763ac2241bee2a2bf2e4648 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 10:43:41 -0500 Subject: [PATCH 333/438] Simplify 'TestAccDMSEndpoint_pauseReplicationTasks'. --- internal/service/dms/endpoint.go | 2 +- internal/service/dms/endpoint_test.go | 164 +++----------------------- 2 files changed, 15 insertions(+), 151 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index ca8b596a833..0d7c92a03cc 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1757,7 +1757,7 @@ func startEndpointReplicationTasks(ctx context.Context, conn *dms.DatabaseMigrat Filters: []*dms.Filter{ { Name: aws.String("endpoint-arn"), - Values: []*string{aws.String(arn)}, + Values: aws.StringSlice([]string{arn}), }, }, }) diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index 8c41537fa03..a3b9ede796f 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -2219,22 +2219,24 @@ func TestAccDMSEndpoint_pauseReplicationTasks(t *testing.T) { CheckDestroy: testAccCheckReplicationTaskDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpointConfig_pauseReplicationTasks(rName, "3306"), + Config: testAccEndpointConfig_pauseReplicationTasks(rName, "source", "target"), Check: resource.ComposeTestCheckFunc( testAccCheckEndpointExists(ctx, endpointNameSource), testAccCheckEndpointExists(ctx, endpointNameTarget), testAccCheckReplicationTaskExists(ctx, replicationTaskName), - resource.TestCheckResourceAttr(endpointNameTarget, "port", "3306"), + resource.TestCheckResourceAttr(endpointNameSource, "endpoint_type", "source"), + resource.TestCheckResourceAttr(endpointNameTarget, "endpoint_type", "target"), resource.TestCheckResourceAttr(replicationTaskName, "status", "running"), ), }, { - Config: testAccEndpointConfig_pauseReplicationTasks(rName, "3307"), + Config: testAccEndpointConfig_pauseReplicationTasks(rName, "target", "source"), Check: resource.ComposeTestCheckFunc( testAccCheckEndpointExists(ctx, endpointNameSource), testAccCheckEndpointExists(ctx, endpointNameTarget), testAccCheckReplicationTaskExists(ctx, replicationTaskName), - resource.TestCheckResourceAttr(endpointNameTarget, "port", "3307"), + resource.TestCheckResourceAttr(endpointNameSource, "endpoint_type", "target"), + resource.TestCheckResourceAttr(endpointNameTarget, "endpoint_type", "source"), resource.TestCheckResourceAttr(replicationTaskName, "status", "running"), ), }, @@ -4681,154 +4683,16 @@ resource "aws_kms_key" "test" { `, rName)) } -func testAccEndpointConfig_pauseReplicationTasks(rName, port string) string { - return acctest.ConfigCompose( - acctest.ConfigAvailableAZsNoOptIn(), - fmt.Sprintf(` -data "aws_partition" "current" {} - -data "aws_region" "current" {} - -resource "aws_vpc" "test" { - cidr_block = "10.1.0.0/16" - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test1" { - cidr_block = "10.1.1.0/24" - availability_zone = data.aws_availability_zones.available.names[0] - vpc_id = aws_vpc.test.id - - tags = { - Name = %[1]q - } -} - -resource "aws_subnet" "test2" { - cidr_block = "10.1.2.0/24" - availability_zone = data.aws_availability_zones.available.names[1] - vpc_id = aws_vpc.test.id - - tags = { - Name = "%[1]s-2" - } -} - -resource "aws_security_group" "test" { - vpc_id = aws_vpc.test.id - - ingress { - protocol = -1 - self = true - from_port = 0 - to_port = 0 - } - - egress { - from_port = 0 - to_port = 0 - protocol = "-1" - cidr_blocks = ["0.0.0.0/0"] - } -} - -resource "aws_db_subnet_group" "test" { - name = %[1]q - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] - - tags = { - Name = %[1]q - } -} - -data "aws_rds_engine_version" "default" { - engine = "aurora-mysql" -} - -data "aws_rds_orderable_db_instance" "test" { - engine = data.aws_rds_engine_version.default.engine - engine_version = data.aws_rds_engine_version.default.version - preferred_instance_classes = ["db.t3.small", "db.t3.medium", "db.t3.large"] -} - -resource "aws_rds_cluster_parameter_group" "test" { - name = "%[1]s-pg-cluster" - family = data.aws_rds_engine_version.default.parameter_group_family - description = "DMS cluster parameter group" - - parameter { - name = "binlog_format" - value = "ROW" - apply_method = "pending-reboot" - } - - parameter { - name = "binlog_row_image" - value = "Full" - apply_method = "pending-reboot" - } - - parameter { - name = "binlog_checksum" - value = "NONE" - apply_method = "pending-reboot" - } -} - -resource "aws_rds_cluster" "source" { - cluster_identifier = "%[1]s-aurora-cluster-source" - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - database_name = "tftest" - master_username = "tftest" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true - vpc_security_group_ids = [aws_security_group.test.id] - db_subnet_group_name = aws_db_subnet_group.test.name - db_cluster_parameter_group_name = aws_rds_cluster_parameter_group.test.name -} - -resource "aws_rds_cluster_instance" "source" { - identifier = "%[1]s-source-primary" - cluster_identifier = aws_rds_cluster.source.id - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - instance_class = data.aws_rds_orderable_db_instance.test.instance_class - db_subnet_group_name = aws_db_subnet_group.test.name -} - -resource "aws_rds_cluster" "target" { - cluster_identifier = "%[1]s-aurora-cluster-target" - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - database_name = "tftest" - master_username = "tftest" - master_password = "mustbeeightcharaters" - skip_final_snapshot = true - vpc_security_group_ids = [aws_security_group.test.id] - db_subnet_group_name = aws_db_subnet_group.test.name -} - -resource "aws_rds_cluster_instance" "target" { - identifier = "%[1]s-target-primary" - cluster_identifier = aws_rds_cluster.target.id - engine = data.aws_rds_orderable_db_instance.test.engine - engine_version = data.aws_rds_orderable_db_instance.test.engine_version - instance_class = data.aws_rds_orderable_db_instance.test.instance_class - db_subnet_group_name = aws_db_subnet_group.test.name -} - +func testAccEndpointConfig_pauseReplicationTasks(rName, type1, type2 string) string { + return acctest.ConfigCompose(testAccRDSClustersConfig_base(rName), fmt.Sprintf(` resource "aws_dms_endpoint" "source" { database_name = "tftest" endpoint_id = "%[1]s-source" - endpoint_type = "source" + endpoint_type = %[2]q engine_name = "aurora" password = "mustbeeightcharaters" pause_replication_tasks = true - port = %[2]s + port = 3306 server_name = aws_rds_cluster.source.endpoint username = "tftest" } @@ -4836,11 +4700,11 @@ resource "aws_dms_endpoint" "source" { resource "aws_dms_endpoint" "target" { database_name = "tftest" endpoint_id = "%[1]s-target" - endpoint_type = "target" + endpoint_type = %[3]q engine_name = "aurora" password = "mustbeeightcharaters" pause_replication_tasks = true - port = %[2]s + port = 3306 server_name = aws_rds_cluster.target.endpoint username = "tftest" } @@ -4848,7 +4712,7 @@ resource "aws_dms_endpoint" "target" { resource "aws_dms_replication_subnet_group" "test" { replication_subnet_group_id = %[1]q replication_subnet_group_description = "terraform test for replication subnet group" - subnet_ids = [aws_subnet.test1.id, aws_subnet.test2.id] + subnet_ids = aws_subnet.test[*].id } resource "aws_dms_replication_instance" "test" { @@ -4880,5 +4744,5 @@ resource "aws_dms_replication_task" "test" { depends_on = [aws_rds_cluster_instance.source, aws_rds_cluster_instance.target] } -`, rName, port)) +`, rName, type1, type2)) } From d67bbaea360697a153e1db198852c4d28f31c65f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 10:52:26 -0500 Subject: [PATCH 334/438] r/aws_s3_bucket: Use Region names from 'names.go' in acceptance tests. --- internal/service/s3/bucket_test.go | 165 ++++++----------------------- internal/service/s3/errors.go | 7 +- 2 files changed, 37 insertions(+), 135 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 72553200794..03d27eb9511 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -16,7 +16,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/s3" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudfront" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" @@ -407,7 +406,7 @@ func TestAccS3Bucket_Duplicate_basic(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckRegionNot(t, endpoints.UsEast1RegionID) + acctest.PreCheckRegionNot(t, names.USEast1RegionID) }, ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, @@ -427,14 +426,14 @@ func TestAccS3Bucket_Duplicate_UsEast1(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartition(t, endpoints.AwsPartitionID) + acctest.PreCheckPartition(t, names.StandardPartitionID) }, ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_duplicate(endpoints.UsEast1RegionID, bucketName), + Config: testAccBucketConfig_duplicate(names.USEast1RegionID, bucketName), ExpectError: regexache.MustCompile(tfs3.ErrCodeBucketAlreadyExists), }, }, @@ -447,7 +446,7 @@ func TestAccS3Bucket_Duplicate_UsEast1AltAccount(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) - acctest.PreCheckPartition(t, endpoints.AwsPartitionID) + acctest.PreCheckPartition(t, names.StandardPartitionID) acctest.PreCheckAlternateAccount(t) }, ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), @@ -455,7 +454,7 @@ func TestAccS3Bucket_Duplicate_UsEast1AltAccount(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_duplicateAltAccount(endpoints.UsEast1RegionID, bucketName), + Config: testAccBucketConfig_duplicateAltAccount(names.USEast1RegionID, bucketName), ExpectError: regexache.MustCompile(tfs3.ErrCodeBucketAlreadyExists), }, }, @@ -2304,7 +2303,7 @@ func TestBucketName(t *testing.T) { } for _, v := range validDnsNames { - if err := tfs3.ValidBucketName(v, endpoints.UsWest2RegionID); err != nil { + if err := tfs3.ValidBucketName(v, names.USWest2RegionID); err != nil { t.Fatalf("%q should be a valid S3 bucket name", v) } } @@ -2321,7 +2320,7 @@ func TestBucketName(t *testing.T) { } for _, v := range invalidDnsNames { - if err := tfs3.ValidBucketName(v, endpoints.UsWest2RegionID); err == nil { + if err := tfs3.ValidBucketName(v, names.USWest2RegionID); err == nil { t.Fatalf("%q should not be a valid S3 bucket name", v) } } @@ -2338,7 +2337,7 @@ func TestBucketName(t *testing.T) { } for _, v := range validEastNames { - if err := tfs3.ValidBucketName(v, endpoints.UsEast1RegionID); err != nil { + if err := tfs3.ValidBucketName(v, names.USEast1RegionID); err != nil { t.Fatalf("%q should be a valid S3 bucket name", v) } } @@ -2349,7 +2348,7 @@ func TestBucketName(t *testing.T) { } for _, v := range invalidEastNames { - if err := tfs3.ValidBucketName(v, endpoints.UsEast1RegionID); err == nil { + if err := tfs3.ValidBucketName(v, names.USEast1RegionID); err == nil { t.Fatalf("%q should not be a valid S3 bucket name", v) } } @@ -2376,24 +2375,24 @@ func TestBucketRegionalDomainName(t *testing.T) { ExpectedOutput: bucket + ".s3.custom.amazonaws.com", }, { - Region: endpoints.UsEast1RegionID, + Region: names.USEast1RegionID, ExpectedErrCount: 0, - ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsEast1RegionID, acctest.PartitionDNSSuffix()), + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", names.USEast1RegionID, acctest.PartitionDNSSuffix()), }, { - Region: endpoints.UsWest2RegionID, + Region: names.USWest2RegionID, ExpectedErrCount: 0, - ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", names.USWest2RegionID, acctest.PartitionDNSSuffix()), }, { - Region: endpoints.UsGovWest1RegionID, + Region: names.USGovWest1RegionID, ExpectedErrCount: 0, - ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.%s", names.USGovWest1RegionID, acctest.PartitionDNSSuffix()), }, { - Region: endpoints.CnNorth1RegionID, + Region: names.CNNorth1RegionID, ExpectedErrCount: 0, - ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.amazonaws.com.cn", endpoints.CnNorth1RegionID), + ExpectedOutput: bucket + fmt.Sprintf(".s3.%s.amazonaws.com.cn", names.CNNorth1RegionID), }, } @@ -2423,146 +2422,50 @@ func TestWebsiteEndpoint(t *testing.T) { { TestingClient: &conns.AWSClient{ DNSSuffix: "amazonaws.com", - Region: endpoints.UsEast1RegionID, + Region: names.USEast1RegionID, }, LocationConstraint: "", - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsEast1RegionID, acctest.PartitionDNSSuffix()), + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", names.USEast1RegionID, acctest.PartitionDNSSuffix()), }, { TestingClient: &conns.AWSClient{ DNSSuffix: "amazonaws.com", - Region: endpoints.UsWest2RegionID, + Region: names.USEast2RegionID, }, - LocationConstraint: endpoints.UsWest2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest2RegionID, acctest.PartitionDNSSuffix()), + LocationConstraint: names.USEast2RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USEast2RegionID, acctest.PartitionDNSSuffix()), }, { TestingClient: &conns.AWSClient{ DNSSuffix: "amazonaws.com", - Region: endpoints.UsWest1RegionID, + Region: names.USGovEast1RegionID, }, - LocationConstraint: endpoints.UsWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsWest1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuWest1RegionID, - }, - LocationConstraint: endpoints.EuWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.EuWest1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuWest3RegionID, - }, - LocationConstraint: endpoints.EuWest3RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuWest3RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.EuCentral1RegionID, - }, - LocationConstraint: endpoints.EuCentral1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.EuCentral1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSouth1RegionID, - }, - LocationConstraint: endpoints.ApSouth1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApSouth1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSoutheast1RegionID, - }, - LocationConstraint: endpoints.ApSoutheast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApNortheast1RegionID, - }, - LocationConstraint: endpoints.ApNortheast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApNortheast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApSoutheast2RegionID, - }, - LocationConstraint: endpoints.ApSoutheast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.ApSoutheast2RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.ApNortheast2RegionID, - }, - LocationConstraint: endpoints.ApNortheast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.ApNortheast2RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.SaEast1RegionID, - }, - LocationConstraint: endpoints.SaEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.SaEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsGovEast1RegionID, - }, - LocationConstraint: endpoints.UsGovEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", endpoints.UsGovEast1RegionID, acctest.PartitionDNSSuffix()), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: endpoints.UsGovWest1RegionID, - }, - LocationConstraint: endpoints.UsGovWest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", endpoints.UsGovWest1RegionID, acctest.PartitionDNSSuffix()), + LocationConstraint: names.USGovEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USGovEast1RegionID, acctest.PartitionDNSSuffix()), }, { TestingClient: &conns.AWSClient{ DNSSuffix: "c2s.ic.gov", - Region: endpoints.UsIsoEast1RegionID, + Region: "us-iso-east-1", }, - LocationConstraint: endpoints.UsIsoEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", endpoints.UsIsoEast1RegionID), + LocationConstraint: "us-iso-east-1", + Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", "us-iso-east-1"), }, { TestingClient: &conns.AWSClient{ DNSSuffix: "sc2s.sgov.gov", - Region: endpoints.UsIsobEast1RegionID, - }, - LocationConstraint: endpoints.UsIsobEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", endpoints.UsIsobEast1RegionID), - }, - { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com.cn", - Region: endpoints.CnNorthwest1RegionID, + Region: "us-isob-east-1", }, - LocationConstraint: endpoints.CnNorthwest1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorthwest1RegionID), + LocationConstraint: "us-isob-east-1", + Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", "us-isob-east-1"), }, { TestingClient: &conns.AWSClient{ DNSSuffix: "amazonaws.com.cn", - Region: endpoints.CnNorth1RegionID, + Region: names.CNNorth1RegionID, }, - LocationConstraint: endpoints.CnNorth1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", endpoints.CnNorth1RegionID), + LocationConstraint: names.CNNorth1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", names.CNNorth1RegionID), }, } diff --git a/internal/service/s3/errors.go b/internal/service/s3/errors.go index 369ce7755af..ce8478e46d1 100644 --- a/internal/service/s3/errors.go +++ b/internal/service/s3/errors.go @@ -32,7 +32,7 @@ const ( errCodeNoSuchWebsiteConfiguration = "NoSuchWebsiteConfiguration" errCodeNotImplemented = "NotImplemented" // errCodeObjectLockConfigurationNotFound should be used with tfawserr.ErrCodeContains, not tfawserr.ErrCodeEquals. - // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/26317 + // Reference: https://github.com/hashicorp/terraform-provider-aws/pull/26317. errCodeObjectLockConfigurationNotFound = "ObjectLockConfigurationNotFound" errCodeObjectLockConfigurationNotFoundError = "ObjectLockConfigurationNotFoundError" errCodeOperationAborted = "OperationAborted" @@ -40,9 +40,8 @@ const ( errCodeReplicationConfigurationNotFound = "ReplicationConfigurationNotFoundError" errCodeServerSideEncryptionConfigurationNotFound = "ServerSideEncryptionConfigurationNotFoundError" errCodeUnsupportedArgument = "UnsupportedArgument" - // errCodeXNotImplemented is returned from Third Party S3 implementations - // and so far has been noticed with calls to GetBucketWebsite. - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/14645 + // errCodeXNotImplemented is returned from third-party S3 API implementations. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/14645. errCodeXNotImplemented = "XNotImplemented" ) From 955ef7a081561329a430ee1b118c08bf885922b5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 10:59:21 -0500 Subject: [PATCH 335/438] s3: Remove vestiges of AWS SDK for Go v1. --- internal/service/s3/bucket_data_source.go | 2 +- internal/service/s3/bucket_lifecycle_configuration_test.go | 3 +-- internal/service/s3/bucket_public_access_block_test.go | 3 +-- internal/service/s3/bucket_replication_configuration_test.go | 2 +- .../s3/bucket_server_side_encryption_configuration_test.go | 3 +-- internal/service/s3/validate.go | 4 ++-- 6 files changed, 7 insertions(+), 10 deletions(-) diff --git a/internal/service/s3/bucket_data_source.go b/internal/service/s3/bucket_data_source.go index 776e851c94e..0b71228f576 100644 --- a/internal/service/s3/bucket_data_source.go +++ b/internal/service/s3/bucket_data_source.go @@ -8,9 +8,9 @@ import ( "fmt" "log" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/feature/s3/manager" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go/aws/arn" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" diff --git a/internal/service/s3/bucket_lifecycle_configuration_test.go b/internal/service/s3/bucket_lifecycle_configuration_test.go index 515dab961ef..66efd2c01b8 100644 --- a/internal/service/s3/bucket_lifecycle_configuration_test.go +++ b/internal/service/s3/bucket_lifecycle_configuration_test.go @@ -11,7 +11,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -1036,7 +1035,7 @@ func TestAccS3BucketLifecycleConfiguration_directoryBucket(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketLifecycleConfigurationDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/s3/bucket_public_access_block_test.go b/internal/service/s3/bucket_public_access_block_test.go index 5ca0991b148..6d6e23d4ef2 100644 --- a/internal/service/s3/bucket_public_access_block_test.go +++ b/internal/service/s3/bucket_public_access_block_test.go @@ -10,7 +10,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -277,7 +276,7 @@ func TestAccS3BucketPublicAccessBlock_directoryBucket(t *testing.T) { resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { acctest.PreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index 58fd6535291..ea865eb2077 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -1190,7 +1190,7 @@ func TestAccS3BucketReplicationConfiguration_directoryBucket(t *testing.T) { acctest.PreCheck(ctx, t) acctest.PreCheckMultipleRegion(t, 2) }, - ErrorCheck: acctest.ErrorCheck(t, s3.EndpointsID), + ErrorCheck: acctest.ErrorCheck(t, names.S3EndpointID), ProtoV5ProviderFactories: acctest.ProtoV5FactoriesPlusProvidersAlternate(ctx, t, &providers), CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ diff --git a/internal/service/s3/bucket_server_side_encryption_configuration_test.go b/internal/service/s3/bucket_server_side_encryption_configuration_test.go index 01fb94e672c..e6545593acd 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration_test.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration_test.go @@ -10,7 +10,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/aws-sdk-go/service/s3" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" "github.com/hashicorp/terraform-plugin-testing/terraform" @@ -349,7 +348,7 @@ func TestAccS3BucketServerSideEncryptionConfiguration_migrate_noChange(t *testin CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { - Config: testAccBucketConfig_defaultEncryptionDefaultKey(rName, s3.ServerSideEncryptionAwsKms), + Config: testAccBucketConfig_defaultEncryptionDefaultKey(rName, string(types.ServerSideEncryptionAwsKms)), Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketExists(ctx, bucketResourceName), resource.TestCheckResourceAttr(bucketResourceName, "server_side_encryption_configuration.#", "1"), diff --git a/internal/service/s3/validate.go b/internal/service/s3/validate.go index f659d03f77d..9eab70125f8 100644 --- a/internal/service/s3/validate.go +++ b/internal/service/s3/validate.go @@ -9,14 +9,14 @@ import ( "time" "github.com/YakDriver/regexache" - "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-provider-aws/names" ) // ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. // Buckets outside of this region have to be DNS-compliant. After the same restrictions are // applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc func ValidBucketName(value string, region string) error { - if region != endpoints.UsEast1RegionID { + if region != names.USEast1RegionID { if (len(value) < 3) || (len(value) > 63) { return fmt.Errorf("%q must contain from 3 to 63 characters", value) } From cd1f39ee74cffe82b4247b5e85a94969ad300859 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 11:15:24 -0500 Subject: [PATCH 336/438] Use Region names from 'names.go' for S3 website hosted zone ID. --- internal/service/s3/hosted_zones.go | 72 ++++++++++++++--------------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/internal/service/s3/hosted_zones.go b/internal/service/s3/hosted_zones.go index 76b5c5c0c1c..c5fc5852986 100644 --- a/internal/service/s3/hosted_zones.go +++ b/internal/service/s3/hosted_zones.go @@ -6,50 +6,50 @@ package s3 import ( "fmt" - "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/hashicorp/terraform-provider-aws/names" ) // See https://docs.aws.amazon.com/general/latest/gr/s3.html#s3_website_region_endpoints. var hostedZoneIDsMap = map[string]string{ - endpoints.AfSouth1RegionID: "Z83WF9RJE8B12", - endpoints.ApEast1RegionID: "ZNB98KWMFR0R6", - endpoints.ApNortheast1RegionID: "Z2M4EHUR26P7ZW", - endpoints.ApNortheast2RegionID: "Z3W03O7B5YMIYP", - endpoints.ApNortheast3RegionID: "Z2YQB5RD63NC85", - endpoints.ApSouth1RegionID: "Z11RGJOFQNVJUP", - endpoints.ApSouth2RegionID: "Z02976202B4EZMXIPMXF7", - endpoints.ApSoutheast1RegionID: "Z3O0J2DXBE1FTB", - endpoints.ApSoutheast2RegionID: "Z1WCIGYICN2BYD", - endpoints.ApSoutheast3RegionID: "Z01846753K324LI26A3VV", - endpoints.ApSoutheast4RegionID: "Z0312387243XT5FE14WFO", - endpoints.CaCentral1RegionID: "Z1QDHH18159H29", - endpoints.CnNorth1RegionID: "Z5CN8UMXT92WN", - endpoints.CnNorthwest1RegionID: "Z282HJ1KT0DH03", - endpoints.EuCentral1RegionID: "Z21DNDUVLTQW6Q", - endpoints.EuCentral2RegionID: "Z030506016YDQGETNASS", - endpoints.EuNorth1RegionID: "Z3BAZG2TWCNX0D", - endpoints.EuSouth1RegionID: "Z30OZKI7KPW7MI", - endpoints.EuSouth2RegionID: "Z0081959F7139GRJC19J", - endpoints.EuWest1RegionID: "Z1BKCTXD74EZPE", - endpoints.EuWest2RegionID: "Z3GKZC51ZF0DB4", - endpoints.EuWest3RegionID: "Z3R1K369G5AVDG", - endpoints.IlCentral1RegionID: "Z09640613K4A3MN55U7GU", - endpoints.MeCentral1RegionID: "Z06143092I8HRXZRUZROF", - endpoints.MeSouth1RegionID: "Z1MPMWCPA7YB62", - endpoints.SaEast1RegionID: "Z7KQH4QJS55SO", - endpoints.UsEast1RegionID: "Z3AQBSTGFYJSTF", - endpoints.UsEast2RegionID: "Z2O1EMRO9K5GLX", - endpoints.UsGovEast1RegionID: "Z2NIFVYYW2VKV1", - endpoints.UsGovWest1RegionID: "Z31GFT0UA1I2HV", - endpoints.UsWest1RegionID: "Z2F56UZL2M1ACD", - endpoints.UsWest2RegionID: "Z3BJ6K6RIION7M", + names.AFSouth1RegionID: "Z83WF9RJE8B12", + names.APEast1RegionID: "ZNB98KWMFR0R6", + names.APNortheast1RegionID: "Z2M4EHUR26P7ZW", + names.APNortheast2RegionID: "Z3W03O7B5YMIYP", + names.APNortheast3RegionID: "Z2YQB5RD63NC85", + names.APSouth1RegionID: "Z11RGJOFQNVJUP", + names.APSouth2RegionID: "Z02976202B4EZMXIPMXF7", + names.APSoutheast1RegionID: "Z3O0J2DXBE1FTB", + names.APSoutheast2RegionID: "Z1WCIGYICN2BYD", + names.APSoutheast3RegionID: "Z01846753K324LI26A3VV", + names.APSoutheast4RegionID: "Z0312387243XT5FE14WFO", + names.CACentral1RegionID: "Z1QDHH18159H29", + names.CNNorth1RegionID: "Z5CN8UMXT92WN", + names.CNNorthwest1RegionID: "Z282HJ1KT0DH03", + names.EUCentral1RegionID: "Z21DNDUVLTQW6Q", + names.EUCentral2RegionID: "Z030506016YDQGETNASS", + names.EUNorth1RegionID: "Z3BAZG2TWCNX0D", + names.EUSouth1RegionID: "Z30OZKI7KPW7MI", + names.EUSouth2RegionID: "Z0081959F7139GRJC19J", + names.EUWest1RegionID: "Z1BKCTXD74EZPE", + names.EUWest2RegionID: "Z3GKZC51ZF0DB4", + names.EUWest3RegionID: "Z3R1K369G5AVDG", + names.ILCentral1RegionID: "Z09640613K4A3MN55U7GU", + names.MECentral1RegionID: "Z06143092I8HRXZRUZROF", + names.MESouth1RegionID: "Z1MPMWCPA7YB62", + names.SAEast1RegionID: "Z7KQH4QJS55SO", + names.USEast1RegionID: "Z3AQBSTGFYJSTF", + names.USEast2RegionID: "Z2O1EMRO9K5GLX", + names.USGovEast1RegionID: "Z2NIFVYYW2VKV1", + names.USGovWest1RegionID: "Z31GFT0UA1I2HV", + names.USWest1RegionID: "Z2F56UZL2M1ACD", + names.USWest2RegionID: "Z3BJ6K6RIION7M", } -// Returns the hosted zone ID for an S3 website endpoint region. This can be -// used as input to the aws_route53_record resource's zone_id argument. +// Returns the hosted zone ID for an S3 website endpoint Region. +// This can be used as input to the aws_route53_record resource's zone_id argument. func HostedZoneIDForRegion(region string) (string, error) { if v, ok := hostedZoneIDsMap[region]; ok { return v, nil } - return "", fmt.Errorf("S3 hosted zone ID not found for region: %s", region) + return "", fmt.Errorf("S3 website Route 53 hosted zone ID not found for Region (%s)", region) } From 7b5a7a02865ca83a7c45ba860961c56d75539f29 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 11:26:41 -0500 Subject: [PATCH 337/438] docs: fix synthetic arn construction example (#34991) --- docs/raising-a-pull-request.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/raising-a-pull-request.md b/docs/raising-a-pull-request.md index ea62da2a5bb..dd296b4576c 100644 --- a/docs/raising-a-pull-request.md +++ b/docs/raising-a-pull-request.md @@ -150,14 +150,14 @@ The below are style-based items that _may_ be noted during review and are recomm ```go // Direct Connect Virtual Interface ARN. // See https://docs.aws.amazon.com/directconnect/latest/UserGuide/security_iam_service-with-iam.html#security_iam_service-with-iam-id-based-policies-resources. - arn := arn.ARN{ - Partition: meta.(*AWSClient).partition, - Region: meta.(*AWSClient).region, - Service: "directconnect", - AccountID: meta.(*AWSClient).accountid, - Resource: fmt.Sprintf("dxvif/%s", d.Id()), - }.String() - d.Set("arn", arn) + arn := arn.ARN{ + Partition: meta.(*conns.AWSClient).Partition, + Region: meta.(*conns.AWSClient).Region, + Service: "directconnect", + AccountID: meta.(*conns.AWSClient).AccountID, + Resource: fmt.Sprintf("dxvif/%s", d.Id()), + }.String() + d.Set("arn", arn) ``` When the `arn` attribute is synthesized this way, add the resource to the [list](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#skip_requesting_account_id) of those affected by the provider's `skip_requesting_account_id` attribute. From a50d08e70cd0269ef1dac1bd5187336d3f5962a5 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 12:21:50 -0500 Subject: [PATCH 338/438] Simplify 'TestAccDMSEndpoint_pauseReplicationTasks' again. --- internal/service/dms/endpoint.go | 593 +++++++++++++------------- internal/service/dms/endpoint_test.go | 20 +- 2 files changed, 308 insertions(+), 305 deletions(-) diff --git a/internal/service/dms/endpoint.go b/internal/service/dms/endpoint.go index 0d7c92a03cc..3492080b999 100644 --- a/internal/service/dms/endpoint.go +++ b/internal/service/dms/endpoint.go @@ -1057,351 +1057,358 @@ func resourceEndpointUpdate(ctx context.Context, d *schema.ResourceData, meta in conn := meta.(*conns.AWSClient).DMSConn(ctx) if d.HasChangesExcept("tags", "tags_all") { - input := &dms.ModifyEndpointInput{ - EndpointArn: aws.String(d.Get("endpoint_arn").(string)), - } - - if d.HasChange("certificate_arn") { - input.CertificateArn = aws.String(d.Get("certificate_arn").(string)) - } - - if d.HasChange("endpoint_type") { - input.EndpointType = aws.String(d.Get("endpoint_type").(string)) - } + endpointARN := d.Get("endpoint_arn").(string) + pauseTasks := d.Get("pause_replication_tasks").(bool) + var tasks []*dms.ReplicationTask - if d.HasChange("engine_name") { - input.EngineName = aws.String(d.Get("engine_name").(string)) - } + if pauseTasks { + var err error + tasks, err = stopEndpointReplicationTasks(ctx, conn, endpointARN) - if d.HasChange("extra_connection_attributes") { - input.ExtraConnectionAttributes = aws.String(d.Get("extra_connection_attributes").(string)) + if err != nil { + return sdkdiag.AppendErrorf(diags, "stopping replication tasks before updating DMS Endpoint (%s): %s", d.Id(), err) + } } - if d.HasChange("service_access_role") { - input.DynamoDbSettings = &dms.DynamoDbSettings{ - ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), + if d.HasChangesExcept("pause_replication_tasks") { + input := &dms.ModifyEndpointInput{ + EndpointArn: aws.String(endpointARN), } - } - if d.HasChange("ssl_mode") { - input.SslMode = aws.String(d.Get("ssl_mode").(string)) - } + if d.HasChange("certificate_arn") { + input.CertificateArn = aws.String(d.Get("certificate_arn").(string)) + } - switch engineName := d.Get("engine_name").(string); engineName { - case engineNameAurora, engineNameMariadb, engineNameMySQL: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MySQLSettings = &dms.MySQLSettings{ - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } - } else { - input.MySQLSettings = &dms.MySQLSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), - } - input.EngineName = aws.String(engineName) + if d.HasChange("endpoint_type") { + input.EndpointType = aws.String(d.Get("endpoint_type").(string)) + } - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) - } + if d.HasChange("engine_name") { + input.EngineName = aws.String(d.Get("engine_name").(string)) } - case engineNameAuroraPostgresql, engineNamePostgres: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ - DatabaseName: aws.String(d.Get("database_name").(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } - } else { - input.PostgreSQLSettings = &dms.PostgreSQLSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), - } - input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) - } + if d.HasChange("extra_connection_attributes") { + input.ExtraConnectionAttributes = aws.String(d.Get("extra_connection_attributes").(string)) } - case engineNameDynamoDB: + if d.HasChange("service_access_role") { input.DynamoDbSettings = &dms.DynamoDbSettings{ ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } } - case engineNameElasticsearch, engineNameOpenSearch: - if d.HasChanges( - "elasticsearch_settings.0.endpoint_uri", - "elasticsearch_settings.0.error_retry_duration", - "elasticsearch_settings.0.full_load_error_percentage", - "elasticsearch_settings.0.service_access_role_arn", - "elasticsearch_settings.0.use_new_mapping_type") { - input.ElasticsearchSettings = &dms.ElasticsearchSettings{ - ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), - EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), - ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), - FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), - UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), - } - input.EngineName = aws.String(engineName) - } - case engineNameKafka: - if d.HasChange("kafka_settings") { - input.KafkaSettings = expandKafkaSettings(d.Get("kafka_settings").([]interface{})[0].(map[string]interface{})) - input.EngineName = aws.String(engineName) - } - case engineNameKinesis: - if d.HasChanges("kinesis_settings") { - input.KinesisSettings = expandKinesisSettings(d.Get("kinesis_settings").([]interface{})[0].(map[string]interface{})) - input.EngineName = aws.String(engineName) + + if d.HasChange("ssl_mode") { + input.SslMode = aws.String(d.Get("ssl_mode").(string)) } - case engineNameMongodb: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "mongodb_settings.0.auth_type", - "mongodb_settings.0.auth_mechanism", "mongodb_settings.0.nesting_level", "mongodb_settings.0.extract_doc_id", - "mongodb_settings.0.docs_to_investigate", "mongodb_settings.0.auth_source", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MongoDbSettings = &dms.MongoDbSettings{ - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - DatabaseName: aws.String(d.Get("database_name").(string)), - KmsKeyId: aws.String(d.Get("kms_key_arn").(string)), - - AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), - ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), - DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), - AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), - } - } else { - input.MongoDbSettings = &dms.MongoDbSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), - KmsKeyId: aws.String(d.Get("kms_key_arn").(string)), - - AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), - AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), - NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), - ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), - DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), - AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), + + switch engineName := d.Get("engine_name").(string); engineName { + case engineNameAurora, engineNameMariadb, engineNameMySQL: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.MySQLSettings = &dms.MySQLSettings{ + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.MySQLSettings = &dms.MySQLSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) + + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) } - input.EngineName = aws.String(engineName) + } + case engineNameAuroraPostgresql, engineNamePostgres: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.PostgreSQLSettings = &dms.PostgreSQLSettings{ + DatabaseName: aws.String(d.Get("database_name").(string)), + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.PostgreSQLSettings = &dms.PostgreSQLSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) + } } - } - case engineNameOracle: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.OracleSettings = &dms.OracleSettings{ - DatabaseName: aws.String(d.Get("database_name").(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + case engineNameDynamoDB: + if d.HasChange("service_access_role") { + input.DynamoDbSettings = &dms.DynamoDbSettings{ + ServiceAccessRoleArn: aws.String(d.Get("service_access_role").(string)), } - } else { - input.OracleSettings = &dms.OracleSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), + } + case engineNameElasticsearch, engineNameOpenSearch: + if d.HasChanges( + "elasticsearch_settings.0.endpoint_uri", + "elasticsearch_settings.0.error_retry_duration", + "elasticsearch_settings.0.full_load_error_percentage", + "elasticsearch_settings.0.service_access_role_arn", + "elasticsearch_settings.0.use_new_mapping_type") { + input.ElasticsearchSettings = &dms.ElasticsearchSettings{ + ServiceAccessRoleArn: aws.String(d.Get("elasticsearch_settings.0.service_access_role_arn").(string)), + EndpointUri: aws.String(d.Get("elasticsearch_settings.0.endpoint_uri").(string)), + ErrorRetryDuration: aws.Int64(int64(d.Get("elasticsearch_settings.0.error_retry_duration").(int))), + FullLoadErrorPercentage: aws.Int64(int64(d.Get("elasticsearch_settings.0.full_load_error_percentage").(int))), + UseNewMappingType: aws.Bool(d.Get("elasticsearch_settings.0.use_new_mapping_type").(bool)), } - input.EngineName = aws.String(engineName) // Must be included (should be 'oracle') - - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) + input.EngineName = aws.String(engineName) } - } - case engineNameRedis: - if d.HasChanges("redis_settings") { - input.RedisSettings = expandRedisSettings(d.Get("redis_settings").([]interface{})[0].(map[string]interface{})) - input.EngineName = aws.String(engineName) - } - case engineNameRedshift: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", - "redshift_settings", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.RedshiftSettings = &dms.RedshiftSettings{ - DatabaseName: aws.String(d.Get("database_name").(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + case engineNameKafka: + if d.HasChange("kafka_settings") { + input.KafkaSettings = expandKafkaSettings(d.Get("kafka_settings").([]interface{})[0].(map[string]interface{})) + input.EngineName = aws.String(engineName) + } + case engineNameKinesis: + if d.HasChanges("kinesis_settings") { + input.KinesisSettings = expandKinesisSettings(d.Get("kinesis_settings").([]interface{})[0].(map[string]interface{})) + input.EngineName = aws.String(engineName) + } + case engineNameMongodb: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "mongodb_settings.0.auth_type", + "mongodb_settings.0.auth_mechanism", "mongodb_settings.0.nesting_level", "mongodb_settings.0.extract_doc_id", + "mongodb_settings.0.docs_to_investigate", "mongodb_settings.0.auth_source", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.MongoDbSettings = &dms.MongoDbSettings{ + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + DatabaseName: aws.String(d.Get("database_name").(string)), + KmsKeyId: aws.String(d.Get("kms_key_arn").(string)), + + AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), + AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), + NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), + ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), + DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), + AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), + } + } else { + input.MongoDbSettings = &dms.MongoDbSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + KmsKeyId: aws.String(d.Get("kms_key_arn").(string)), + + AuthType: aws.String(d.Get("mongodb_settings.0.auth_type").(string)), + AuthMechanism: aws.String(d.Get("mongodb_settings.0.auth_mechanism").(string)), + NestingLevel: aws.String(d.Get("mongodb_settings.0.nesting_level").(string)), + ExtractDocId: aws.String(d.Get("mongodb_settings.0.extract_doc_id").(string)), + DocsToInvestigate: aws.String(d.Get("mongodb_settings.0.docs_to_investigate").(string)), + AuthSource: aws.String(d.Get("mongodb_settings.0.auth_source").(string)), + } + input.EngineName = aws.String(engineName) + + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) } - } else { - input.RedshiftSettings = &dms.RedshiftSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), + } + case engineNameOracle: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.OracleSettings = &dms.OracleSettings{ + DatabaseName: aws.String(d.Get("database_name").(string)), + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.OracleSettings = &dms.OracleSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) // Must be included (should be 'oracle') + + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) } - input.EngineName = aws.String(engineName) // Must be included (should be 'redshift') + } + case engineNameRedis: + if d.HasChanges("redis_settings") { + input.RedisSettings = expandRedisSettings(d.Get("redis_settings").([]interface{})[0].(map[string]interface{})) + input.EngineName = aws.String(engineName) + } + case engineNameRedshift: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", + "redshift_settings", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.RedshiftSettings = &dms.RedshiftSettings{ + DatabaseName: aws.String(d.Get("database_name").(string)), + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.RedshiftSettings = &dms.RedshiftSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) // Must be included (should be 'redshift') - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) - if v, ok := d.GetOk("redshift_settings"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - tfMap := v.([]interface{})[0].(map[string]interface{}) + if v, ok := d.GetOk("redshift_settings"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) - if v, ok := tfMap["bucket_folder"].(string); ok && v != "" { - input.RedshiftSettings.BucketFolder = aws.String(v) - } + if v, ok := tfMap["bucket_folder"].(string); ok && v != "" { + input.RedshiftSettings.BucketFolder = aws.String(v) + } - if v, ok := tfMap["bucket_name"].(string); ok && v != "" { - input.RedshiftSettings.BucketName = aws.String(v) - } + if v, ok := tfMap["bucket_name"].(string); ok && v != "" { + input.RedshiftSettings.BucketName = aws.String(v) + } - if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { - input.RedshiftSettings.EncryptionMode = aws.String(v) - } + if v, ok := tfMap["encryption_mode"].(string); ok && v != "" { + input.RedshiftSettings.EncryptionMode = aws.String(v) + } - if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { - input.RedshiftSettings.ServerSideEncryptionKmsKeyId = aws.String(v) - } + if v, ok := tfMap["server_side_encryption_kms_key_id"].(string); ok && v != "" { + input.RedshiftSettings.ServerSideEncryptionKmsKeyId = aws.String(v) + } - if v, ok := tfMap["service_access_role_arn"].(string); ok && v != "" { - input.RedshiftSettings.ServiceAccessRoleArn = aws.String(v) + if v, ok := tfMap["service_access_role_arn"].(string); ok && v != "" { + input.RedshiftSettings.ServiceAccessRoleArn = aws.String(v) + } } } } - } - case engineNameSQLServer, engineNameBabelfish: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ - DatabaseName: aws.String(d.Get("database_name").(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), - } - } else { - input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), - } - input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') + case engineNameSQLServer, engineNameBabelfish: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + DatabaseName: aws.String(d.Get("database_name").(string)), + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.MicrosoftSQLServerSettings = &dms.MicrosoftSQLServerSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) - } - } - case engineNameSybase: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.SybaseSettings = &dms.SybaseSettings{ - DatabaseName: aws.String(d.Get("database_name").(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) } - } else { - input.SybaseSettings = &dms.SybaseSettings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), - } - input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') - - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) } - } - case engineNameDB2, engineNameDB2zOS: - if d.HasChanges( - "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", - "secrets_manager_arn") { - if _, ok := d.GetOk("secrets_manager_arn"); ok { - input.IBMDb2Settings = &dms.IBMDb2Settings{ - DatabaseName: aws.String(d.Get("database_name").(string)), - SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), - SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + case engineNameSybase: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.SybaseSettings = &dms.SybaseSettings{ + DatabaseName: aws.String(d.Get("database_name").(string)), + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.SybaseSettings = &dms.SybaseSettings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) // Must be included (should be 'postgres') + + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) } - } else { - input.IBMDb2Settings = &dms.IBMDb2Settings{ - Username: aws.String(d.Get("username").(string)), - Password: aws.String(d.Get("password").(string)), - ServerName: aws.String(d.Get("server_name").(string)), - Port: aws.Int64(int64(d.Get("port").(int))), - DatabaseName: aws.String(d.Get("database_name").(string)), + } + case engineNameDB2, engineNameDB2zOS: + if d.HasChanges( + "username", "password", "server_name", "port", "database_name", "secrets_manager_access_role_arn", + "secrets_manager_arn") { + if _, ok := d.GetOk("secrets_manager_arn"); ok { + input.IBMDb2Settings = &dms.IBMDb2Settings{ + DatabaseName: aws.String(d.Get("database_name").(string)), + SecretsManagerAccessRoleArn: aws.String(d.Get("secrets_manager_access_role_arn").(string)), + SecretsManagerSecretId: aws.String(d.Get("secrets_manager_arn").(string)), + } + } else { + input.IBMDb2Settings = &dms.IBMDb2Settings{ + Username: aws.String(d.Get("username").(string)), + Password: aws.String(d.Get("password").(string)), + ServerName: aws.String(d.Get("server_name").(string)), + Port: aws.Int64(int64(d.Get("port").(int))), + DatabaseName: aws.String(d.Get("database_name").(string)), + } + input.EngineName = aws.String(engineName) // Must be included (should be 'db2') + + // Update connection info in top-level namespace as well + expandTopLevelConnectionInfoModify(d, input) } - input.EngineName = aws.String(engineName) // Must be included (should be 'db2') + } + case engineNameS3: + if d.HasChanges("s3_settings") { + input.S3Settings = expandS3Settings(d.Get("s3_settings").([]interface{})[0].(map[string]interface{})) + input.EngineName = aws.String(engineName) + } + default: + if d.HasChange("database_name") { + input.DatabaseName = aws.String(d.Get("database_name").(string)) + } - // Update connection info in top-level namespace as well - expandTopLevelConnectionInfoModify(d, input) + if d.HasChange("password") { + input.Password = aws.String(d.Get("password").(string)) } - } - case engineNameS3: - if d.HasChanges("s3_settings") { - input.S3Settings = expandS3Settings(d.Get("s3_settings").([]interface{})[0].(map[string]interface{})) - input.EngineName = aws.String(engineName) - } - default: - if d.HasChange("database_name") { - input.DatabaseName = aws.String(d.Get("database_name").(string)) - } - if d.HasChange("password") { - input.Password = aws.String(d.Get("password").(string)) - } + if d.HasChange("port") { + input.Port = aws.Int64(int64(d.Get("port").(int))) + } - if d.HasChange("port") { - input.Port = aws.Int64(int64(d.Get("port").(int))) - } + if d.HasChange("server_name") { + input.ServerName = aws.String(d.Get("server_name").(string)) + } - if d.HasChange("server_name") { - input.ServerName = aws.String(d.Get("server_name").(string)) + if d.HasChange("username") { + input.Username = aws.String(d.Get("username").(string)) + } } - if d.HasChange("username") { - input.Username = aws.String(d.Get("username").(string)) - } - } + _, err := conn.ModifyEndpointWithContext(ctx, input) - var tasks []*dms.ReplicationTask - if v, ok := d.GetOk("pause_replication_tasks"); ok && v.(bool) { - var err error - tasks, err = stopEndpointReplicationTasks(ctx, conn, d.Get("endpoint_arn").(string)) if err != nil { - return sdkdiag.AppendErrorf(diags, "pausing replication tasks before updating DMS Endpoint (%s): %s", d.Id(), err) + return sdkdiag.AppendErrorf(diags, "updating DMS Endpoint (%s): %s", d.Id(), err) } } - _, err := conn.ModifyEndpointWithContext(ctx, input) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating DMS Endpoint (%s): %s", d.Id(), err) - } - - if v, ok := d.GetOk("pause_replication_tasks"); ok && v.(bool) && len(tasks) > 0 { - if err := startEndpointReplicationTasks(ctx, conn, d.Get("endpoint_arn").(string), tasks); err != nil { + if pauseTasks && len(tasks) > 0 { + if err := startEndpointReplicationTasks(ctx, conn, endpointARN, tasks); err != nil { return sdkdiag.AppendErrorf(diags, "starting replication tasks after updating DMS Endpoint (%s): %s", d.Id(), err) } } diff --git a/internal/service/dms/endpoint_test.go b/internal/service/dms/endpoint_test.go index a3b9ede796f..f661333bf5f 100644 --- a/internal/service/dms/endpoint_test.go +++ b/internal/service/dms/endpoint_test.go @@ -2219,24 +2219,20 @@ func TestAccDMSEndpoint_pauseReplicationTasks(t *testing.T) { CheckDestroy: testAccCheckReplicationTaskDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccEndpointConfig_pauseReplicationTasks(rName, "source", "target"), + Config: testAccEndpointConfig_pauseReplicationTasks(rName, false), Check: resource.ComposeTestCheckFunc( testAccCheckEndpointExists(ctx, endpointNameSource), testAccCheckEndpointExists(ctx, endpointNameTarget), testAccCheckReplicationTaskExists(ctx, replicationTaskName), - resource.TestCheckResourceAttr(endpointNameSource, "endpoint_type", "source"), - resource.TestCheckResourceAttr(endpointNameTarget, "endpoint_type", "target"), resource.TestCheckResourceAttr(replicationTaskName, "status", "running"), ), }, { - Config: testAccEndpointConfig_pauseReplicationTasks(rName, "target", "source"), + Config: testAccEndpointConfig_pauseReplicationTasks(rName, true), Check: resource.ComposeTestCheckFunc( testAccCheckEndpointExists(ctx, endpointNameSource), testAccCheckEndpointExists(ctx, endpointNameTarget), testAccCheckReplicationTaskExists(ctx, replicationTaskName), - resource.TestCheckResourceAttr(endpointNameSource, "endpoint_type", "target"), - resource.TestCheckResourceAttr(endpointNameTarget, "endpoint_type", "source"), resource.TestCheckResourceAttr(replicationTaskName, "status", "running"), ), }, @@ -4683,15 +4679,15 @@ resource "aws_kms_key" "test" { `, rName)) } -func testAccEndpointConfig_pauseReplicationTasks(rName, type1, type2 string) string { +func testAccEndpointConfig_pauseReplicationTasks(rName string, pause bool) string { return acctest.ConfigCompose(testAccRDSClustersConfig_base(rName), fmt.Sprintf(` resource "aws_dms_endpoint" "source" { database_name = "tftest" endpoint_id = "%[1]s-source" - endpoint_type = %[2]q + endpoint_type = "source" engine_name = "aurora" password = "mustbeeightcharaters" - pause_replication_tasks = true + pause_replication_tasks = %[2]t port = 3306 server_name = aws_rds_cluster.source.endpoint username = "tftest" @@ -4700,10 +4696,10 @@ resource "aws_dms_endpoint" "source" { resource "aws_dms_endpoint" "target" { database_name = "tftest" endpoint_id = "%[1]s-target" - endpoint_type = %[3]q + endpoint_type = "target" engine_name = "aurora" password = "mustbeeightcharaters" - pause_replication_tasks = true + pause_replication_tasks = %[2]t port = 3306 server_name = aws_rds_cluster.target.endpoint username = "tftest" @@ -4744,5 +4740,5 @@ resource "aws_dms_replication_task" "test" { depends_on = [aws_rds_cluster_instance.source, aws_rds_cluster_instance.target] } -`, rName, type1, type2)) +`, rName, pause)) } From 7e834638830c81f28774b2a1383307358549cf09 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 12:25:37 -0500 Subject: [PATCH 339/438] 'HostedZoneIDForRegion' -> 'hostedZoneIDForRegion'. --- internal/service/s3/bucket_data_source.go | 2 +- internal/service/s3/exports_test.go | 2 + internal/service/s3/hosted_zones.go | 4 +- internal/service/s3/validate.go | 58 ----------------------- 4 files changed, 5 insertions(+), 61 deletions(-) delete mode 100644 internal/service/s3/validate.go diff --git a/internal/service/s3/bucket_data_source.go b/internal/service/s3/bucket_data_source.go index 0b71228f576..f61cd423eb9 100644 --- a/internal/service/s3/bucket_data_source.go +++ b/internal/service/s3/bucket_data_source.go @@ -105,7 +105,7 @@ func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, meta inte } else { log.Printf("[WARN] BucketRegionalDomainName: %s", err) } - if hostedZoneID, err := HostedZoneIDForRegion(region); err == nil { + if hostedZoneID, err := hostedZoneIDForRegion(region); err == nil { d.Set("hosted_zone_id", hostedZoneID) } else { log.Printf("[WARN] HostedZoneIDForRegion: %s", err) diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 42495f702a1..7f811f45450 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -30,8 +30,10 @@ var ( FindPublicAccessBlockConfiguration = findPublicAccessBlockConfiguration FindReplicationConfiguration = findReplicationConfiguration FindServerSideEncryptionConfiguration = findServerSideEncryptionConfiguration + HostedZoneIDForRegion = hostedZoneIDForRegion IsDirectoryBucket = isDirectoryBucket SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey + ValidBucketName = validBucketName ErrCodeBucketAlreadyExists = errCodeBucketAlreadyExists ErrCodeBucketAlreadyOwnedByYou = errCodeBucketAlreadyOwnedByYou diff --git a/internal/service/s3/hosted_zones.go b/internal/service/s3/hosted_zones.go index c5fc5852986..2068cd5c72d 100644 --- a/internal/service/s3/hosted_zones.go +++ b/internal/service/s3/hosted_zones.go @@ -45,9 +45,9 @@ var hostedZoneIDsMap = map[string]string{ names.USWest2RegionID: "Z3BJ6K6RIION7M", } -// Returns the hosted zone ID for an S3 website endpoint Region. +// hostedZoneIDForRegion returns the Route 53 hosted zone ID for an S3 website endpoint Region. // This can be used as input to the aws_route53_record resource's zone_id argument. -func HostedZoneIDForRegion(region string) (string, error) { +func hostedZoneIDForRegion(region string) (string, error) { if v, ok := hostedZoneIDsMap[region]; ok { return v, nil } diff --git a/internal/service/s3/validate.go b/internal/service/s3/validate.go deleted file mode 100644 index 9eab70125f8..00000000000 --- a/internal/service/s3/validate.go +++ /dev/null @@ -1,58 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package s3 - -import ( - "fmt" - "strings" - "time" - - "github.com/YakDriver/regexache" - "github.com/hashicorp/terraform-provider-aws/names" -) - -// ValidBucketName validates any S3 bucket name that is not inside the us-east-1 region. -// Buckets outside of this region have to be DNS-compliant. After the same restrictions are -// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc -func ValidBucketName(value string, region string) error { - if region != names.USEast1RegionID { - if (len(value) < 3) || (len(value) > 63) { - return fmt.Errorf("%q must contain from 3 to 63 characters", value) - } - if !regexache.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { - return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) - } - if regexache.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { - return fmt.Errorf("%q must not be formatted as an IP address", value) - } - if strings.HasPrefix(value, `.`) { - return fmt.Errorf("%q cannot start with a period", value) - } - if strings.HasSuffix(value, `.`) { - return fmt.Errorf("%q cannot end with a period", value) - } - if strings.Contains(value, `..`) { - return fmt.Errorf("%q can be only one period between labels", value) - } - } else { - if len(value) > 255 { - return fmt.Errorf("%q must contain less than 256 characters", value) - } - if !regexache.MustCompile(`^[0-9A-Za-z_.-]+$`).MatchString(value) { - return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) - } - } - return nil -} - -func validBucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { - value := v.(string) - _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) - if err != nil { - errors = append(errors, fmt.Errorf( - "%q cannot be parsed as RFC3339 Timestamp Format", value)) - } - - return -} From d47823c9f7021d3d8c1cc57cd28ef2c0737bcda1 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 13:46:42 -0500 Subject: [PATCH 340/438] r/aws_batch_job_definition: add enums missing from aws sdk --- internal/service/batch/eks_properties.go | 29 ++++++++++++++++++++++++ internal/service/batch/job_definition.go | 13 ++++------- 2 files changed, 34 insertions(+), 8 deletions(-) diff --git a/internal/service/batch/eks_properties.go b/internal/service/batch/eks_properties.go index fd26900cd1a..187be96b77a 100644 --- a/internal/service/batch/eks_properties.go +++ b/internal/service/batch/eks_properties.go @@ -10,6 +10,34 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" ) +const ( + ImagePullPolicyAlways = "Always" + ImagePullPolicyIfNotPresent = "IfNotPresent" + ImagePullPolicyNever = "Never" +) + +func ImagePullPolicy_Values() []string { + return []string{ + ImagePullPolicyAlways, + ImagePullPolicyIfNotPresent, + ImagePullPolicyNever, + } +} + +const ( + DNSPolicyDefault = "Default" + DNSPolicyClusterFirst = "ClusterFirst" + DNSPolicyClusterFirstWithHostNet = "ClusterFirstWithHostNet" +) + +func DNSPolicy_Values() []string { + return []string{ + DNSPolicyDefault, + DNSPolicyClusterFirst, + DNSPolicyClusterFirstWithHostNet, + } +} + func expandEKSPodProperties(podPropsMap map[string]interface{}) *batch.EksPodProperties { podProps := &batch.EksPodProperties{} @@ -232,6 +260,7 @@ func flattenEKSPodProperties(podProperties *batch.EksPodProperties) (tfList []in tfList = append(tfList, tfMap) return tfList } + func flattenEKSContainers(containers []*batch.EksContainer) (tfList []interface{}) { for _, container := range containers { tfMap := map[string]interface{}{} diff --git a/internal/service/batch/job_definition.go b/internal/service/batch/job_definition.go index ab2fce60460..a3c7afb5726 100644 --- a/internal/service/batch/job_definition.go +++ b/internal/service/batch/job_definition.go @@ -134,13 +134,9 @@ func ResourceJobDefinition() *schema.Resource { Required: true, }, "image_pull_policy": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice([]string{ - "Always", - "IfNotPresent", - "Never", - }, false), + Type: schema.TypeString, + Optional: true, + ValidateFunc: validation.StringInSlice(ImagePullPolicy_Values(), false), }, "name": { Type: schema.TypeString, @@ -220,7 +216,7 @@ func ResourceJobDefinition() *schema.Resource { "dns_policy": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{"Default", "ClusterFirst", "ClusterFirstWithHostNet"}, false), + ValidateFunc: validation.StringInSlice(DNSPolicy_Values(), false), }, "host_network": { Type: schema.TypeBool, @@ -460,6 +456,7 @@ func resourceJobDefinitionCreate(ctx context.Context, d *schema.ResourceData, me input.ContainerProperties = props } } + if v, ok := d.GetOk("eks_properties"); ok && len(v.([]interface{})) > 0 { eksProps := v.([]interface{})[0].(map[string]interface{}) if podProps, ok := eksProps["pod_properties"].([]interface{}); ok && len(podProps) > 0 { From e8a470fbde29811551cad27baf2218f924cf394b Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 14:01:47 -0500 Subject: [PATCH 341/438] r/aws_batch_job_definition(doc): tidy headings, eks_properties descriptions --- .../docs/r/batch_job_definition.html.markdown | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/website/docs/r/batch_job_definition.html.markdown b/website/docs/r/batch_job_definition.html.markdown index f3f5b2e657a..512adbf6646 100644 --- a/website/docs/r/batch_job_definition.html.markdown +++ b/website/docs/r/batch_job_definition.html.markdown @@ -213,63 +213,63 @@ The following arguments are optional: * `tags` - (Optional) Key-value map of resource tags. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. * `timeout` - (Optional) Specifies the timeout for jobs so that if a job runs longer, AWS Batch terminates the job. Maximum number of `timeout` is `1`. Defined below. -### eks_properties +### `eks_properties` -* `pod_properties` - The properties for the Kubernetes pod resources of a job. +* `pod_properties` - The properties for the Kubernetes pod resources of a job. See [`pod_properties`](#pod_properties) below. -### pod_properties +### `pod_properties` -* `containers` - The properties of the container that's used on the Amazon EKS pod. See [containers](#containers) -* `dns_policy` - (Optional) The DNS policy for the pod. The default value is `ClusterFirst`. If the hostNetwork parameter is not specified, the default is `ClusterFirstWithHostNet`. ClusterFirst indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. -* `host_network` - (Optional) Indicates if the pod uses the hosts' network IP address. The default value is `true`. Setting this to false enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. +* `containers` - The properties of the container that's used on the Amazon EKS pod. See [containers](#containers) below. +* `dns_policy` - (Optional) The DNS policy for the pod. The default value is `ClusterFirst`. If the `host_network` argument is not specified, the default is `ClusterFirstWithHostNet`. `ClusterFirst` indicates that any DNS query that does not match the configured cluster domain suffix is forwarded to the upstream nameserver inherited from the node. For more information, see Pod's DNS policy in the Kubernetes documentation. +* `host_network` - (Optional) Indicates if the pod uses the hosts' network IP address. The default value is `true`. Setting this to `false` enables the Kubernetes pod networking model. Most AWS Batch workloads are egress-only and don't require the overhead of IP allocation for each pod for incoming connections. * `metadata` - (Optional) Metadata about the Kubernetes pod. * `service_account_name` - (Optional) The name of the service account that's used to run the pod. * `volumes` - (Optional) Specifies the volumes for a job definition that uses Amazon EKS resources. AWS Batch supports [emptyDir](#eks_empty_dir), [hostPath](#eks_host_path), and [secret](#eks_secret) volume types. -### containers +### `containers` * `image` - The Docker image used to start the container. * `args` - An array of arguments to the entrypoint. If this isn't specified, the CMD of the container image is used. This corresponds to the args member in the Entrypoint portion of the Pod in Kubernetes. Environment variable references are expanded using the container's environment. * `command` - The entrypoint for the container. This isn't run within a shell. If this isn't specified, the ENTRYPOINT of the container image is used. Environment variable references are expanded using the container's environment. -* `env` - The environment variables to pass to a container. See [EKS Environment](#eks_environment) +* `env` - The environment variables to pass to a container. See [EKS Environment](#eks_environment) below. * `image_pull_policy` - The image pull policy for the container. Supported values are `Always`, `IfNotPresent`, and `Never`. * `name` - The name of the container. If the name isn't specified, the default name "Default" is used. Each container in a pod must have a unique name. -* `resources` - The type and amount of resources to assign to a container. The supported resources include `memory`, `cpu`, and `nvidia.com/gpu` -* `security_context` - The security context for a job +* `resources` - The type and amount of resources to assign to a container. The supported resources include `memory`, `cpu`, and `nvidia.com/gpu`. +* `security_context` - The security context for a job. * `volume_mounts` - The volume mounts for the container. -### eks_environment +### `eks_environment` * `name` - The name of the environment variable. * `value` - The value of the environment variable. -### eks_empty_dir +### `eks_empty_dir` * `medium` - (Optional) The medium to store the volume. The default value is an empty string, which uses the storage of the node. * `size_limit` - The maximum size of the volume. By default, there's no maximum size defined. -### eks_host_path +### `eks_host_path` * `path` - The path of the file or directory on the host to mount into containers on the pod. -### eks_secret +### `eks_secret` * `secret_name` - The name of the secret. The name must be allowed as a DNS subdomain name. * `optional` - (Optional) Specifies whether the secret or the secret's keys must be defined. -### retry_strategy +### `retry_strategy` * `attempts` - (Optional) The number of times to move a job to the `RUNNABLE` status. You may specify between `1` and `10` attempts. * `evaluate_on_exit` - (Optional) The [evaluate on exit](#evaluate_on_exit) conditions under which the job should be retried or failed. If this parameter is specified, then the `attempts` parameter must also be specified. You may specify up to 5 configuration blocks. -#### evaluate_on_exit +#### `evaluate_on_exit` * `action` - (Required) Specifies the action to take if all of the specified conditions are met. The values are not case sensitive. Valid values: `RETRY`, `EXIT`. * `on_exit_code` - (Optional) A glob pattern to match against the decimal representation of the exit code returned for a job. * `on_reason` - (Optional) A glob pattern to match against the reason returned for a job. * `on_status_reason` - (Optional) A glob pattern to match against the status reason returned for a job. -### timeout +### `timeout` * `attempt_duration_seconds` - (Optional) The time duration in seconds after which AWS Batch terminates your jobs if they have not finished. The minimum value for the timeout is `60` seconds. From c612b298cc9c22f296ae588328982768775dc340 Mon Sep 17 00:00:00 2001 From: drewmullen Date: Tue, 19 Dec 2023 14:46:21 -0500 Subject: [PATCH 342/438] Update website/docs/d/batch_compute_environment.html.markdown Co-authored-by: Jared Baker --- website/docs/d/batch_compute_environment.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/d/batch_compute_environment.html.markdown b/website/docs/d/batch_compute_environment.html.markdown index 5d9a2321862..b7dae8cd3d4 100644 --- a/website/docs/d/batch_compute_environment.html.markdown +++ b/website/docs/d/batch_compute_environment.html.markdown @@ -36,5 +36,5 @@ This data source exports the following attributes in addition to the arguments a * `status` - Current status of the compute environment (for example, `CREATING` or `VALID`). * `status_reason` - Short, human-readable string to provide additional details about the current status of the compute environment. * `state` - State of the compute environment (for example, `ENABLED` or `DISABLED`). If the state is `ENABLED`, then the compute environment accepts jobs from a queue and can scale out automatically based on queues. -* `update_policy` - (Optional) Specifies the infrastructure update policy for the compute environment. +* `update_policy` - Specifies the infrastructure update policy for the compute environment. * `tags` - Key-value map of resource tags From f72c287d0fc7df619e681482292a20d25f06868d Mon Sep 17 00:00:00 2001 From: drewmullen Date: Tue, 19 Dec 2023 14:46:36 -0500 Subject: [PATCH 343/438] Update internal/service/batch/compute_environment.go Co-authored-by: Jared Baker --- internal/service/batch/compute_environment.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/batch/compute_environment.go b/internal/service/batch/compute_environment.go index 9148b413931..4621ad7be69 100644 --- a/internal/service/batch/compute_environment.go +++ b/internal/service/batch/compute_environment.go @@ -319,13 +319,13 @@ func resourceComputeEnvironmentCreate(ctx context.Context, d *schema.ResourceDat // UpdatePolicy is not possible to set with CreateComputeEnvironment if v, ok := d.GetOk("update_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - inputeUpdateOnCreate := &batch.UpdateComputeEnvironmentInput{ + inputUpdateOnCreate := &batch.UpdateComputeEnvironmentInput{ ComputeEnvironment: aws.String(d.Id()), UpdatePolicy: expandComputeEnvironmentUpdatePolicy(v.([]interface{})), } - log.Printf("[DEBUG] Creating Batch Compute Environment extra arguments: %s", input) + log.Printf("[DEBUG] Creating Batch Compute Environment extra arguments: %s", inputUpdateOnCreate) - if _, err := conn.UpdateComputeEnvironmentWithContext(ctx, inputeUpdateOnCreate); err != nil { + if _, err := conn.UpdateComputeEnvironmentWithContext(ctx, inputUpdateOnCreate); err != nil { return sdkdiag.AppendErrorf(diags, "Create Batch Compute Environment extra arguments through UpdateComputeEnvironment (%s): %s", d.Id(), err) } From 66243271326f0877b8259defa9083832307b3d42 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 15:10:16 -0500 Subject: [PATCH 344/438] Add 'names.DNSSuffixForPartition'. --- names/names.go | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/names/names.go b/names/names.go index 2195941e82b..a184544d952 100644 --- a/names/names.go +++ b/names/names.go @@ -100,6 +100,10 @@ const ( // See https://github.com/hashicorp/aws-sdk-go-base/issues/649. const ( ChinaPartitionID = "aws-cn" // AWS China partition. + ISOPartitionID = "aws-iso" // AWS ISO (US) partition. + ISOBPartitionID = "aws-iso-b" // AWS ISOB (US) partition. + ISOEPartitionID = "aws-iso-e" // AWS ISOE (Europe) partition. + ISOFPartitionID = "aws-iso-f" // AWS ISOF partition. StandardPartitionID = "aws" // AWS Standard partition. USGovCloudPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. ) @@ -146,6 +150,23 @@ const ( USGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) +func DNSSuffixForPartition(partition string) string { + switch partition { + case ChinaPartitionID: + return "amazonaws.com.cn" + case ISOPartitionID: + return "c2s.ic.gov" + case ISOBPartitionID: + return "sc2s.sgov.gov" + case ISOEPartitionID: + return "cloud.adc-e.uk" + case ISOFPartitionID: + return "csp.hci.ic.gov" + default: + return "amazonaws.com" + } +} + // Type ServiceDatum corresponds closely to columns in `names_data.csv` and are // described in detail in README.md. type ServiceDatum struct { From 3317c34e158fce9a2173ec582fe8d011407435c7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 15:15:28 -0500 Subject: [PATCH 345/438] Add 'names.PartitionForRegion'. --- names/names.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/names/names.go b/names/names.go index a184544d952..27a9ab96c31 100644 --- a/names/names.go +++ b/names/names.go @@ -148,6 +148,13 @@ const ( // AWS GovCloud (US) partition's regions. USGovEast1RegionID = "us-gov-east-1" // AWS GovCloud (US-East). USGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). + + // AWS ISO (US) partition's regions. + USISOEast1RegionID = "us-iso-east-1" // US ISO East. + USISOWest1RegionID = "us-iso-west-1" // US ISO WEST. + + // AWS ISOB (US) partition's regions. + USISOBEast1RegionID = "us-isob-east-1" // US ISOB East (Ohio). ) func DNSSuffixForPartition(partition string) string { @@ -167,6 +174,21 @@ func DNSSuffixForPartition(partition string) string { } } +func PartitionForRegion(region string) string { + switch region { + case CNNorth1RegionID, CNNorthwest1RegionID: + return ChinaPartitionID + case USISOEast1RegionID, USISOWest1RegionID: + return ISOPartitionID + case USISOBEast1RegionID: + return ISOBPartitionID + case USGovEast1RegionID, USGovWest1RegionID: + return USGovCloudPartitionID + default: + return StandardPartitionID + } +} + // Type ServiceDatum corresponds closely to columns in `names_data.csv` and are // described in detail in README.md. type ServiceDatum struct { From 79dcd6f7c489c6f90f48f27ce107781c3590c632 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 15:20:25 -0500 Subject: [PATCH 346/438] 'conns.ReverseDNS' -> 'names.ReverseDNS'. --- internal/acctest/acctest.go | 3 +- internal/conns/config.go | 2 +- internal/conns/conns_test.go | 55 ------------------------------------ names/names.go | 11 ++++++++ names/names_test.go | 47 ++++++++++++++++++++++++++++++ 5 files changed, 61 insertions(+), 57 deletions(-) delete mode 100644 internal/conns/conns_test.go diff --git a/internal/acctest/acctest.go b/internal/acctest/acctest.go index 912d69043a8..57e0932a538 100644 --- a/internal/acctest/acctest.go +++ b/internal/acctest/acctest.go @@ -53,6 +53,7 @@ import ( tforganizations "github.com/hashicorp/terraform-provider-aws/internal/service/organizations" tfsts "github.com/hashicorp/terraform-provider-aws/internal/service/sts" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + "github.com/hashicorp/terraform-provider-aws/names" "github.com/jmespath/go-jmespath" "github.com/mitchellh/mapstructure" ) @@ -824,7 +825,7 @@ func PartitionDNSSuffix() string { func PartitionReverseDNSPrefix() string { if partition, ok := endpoints.PartitionForRegion(endpoints.DefaultPartitions(), Region()); ok { - return conns.ReverseDNS(partition.DNSSuffix()) + return names.ReverseDNS(partition.DNSSuffix()) } return "com.amazonaws" diff --git a/internal/conns/config.go b/internal/conns/config.go index 75b949907c2..743cba1d2ef 100644 --- a/internal/conns/config.go +++ b/internal/conns/config.go @@ -199,7 +199,7 @@ func (c *Config) ConfigureProvider(ctx context.Context, client *AWSClient) (*AWS client.IgnoreTagsConfig = c.IgnoreTagsConfig client.Partition = partition client.Region = c.Region - client.ReverseDNSPrefix = ReverseDNS(DNSSuffix) + client.ReverseDNSPrefix = names.ReverseDNS(DNSSuffix) client.SetHTTPClient(sess.Config.HTTPClient) // Must be called while client.Session is nil. client.Session = sess client.TerraformVersion = c.TerraformVersion diff --git a/internal/conns/conns_test.go b/internal/conns/conns_test.go deleted file mode 100644 index 17c962cc58f..00000000000 --- a/internal/conns/conns_test.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package conns - -import ( - "testing" -) - -func TestReverseDNS(t *testing.T) { - t.Parallel() - - testCases := []struct { - name string - input string - expected string - }{ - { - name: "empty", - input: "", - expected: "", - }, - { - name: "amazonaws.com", - input: "amazonaws.com", - expected: "com.amazonaws", - }, - { - name: "amazonaws.com.cn", - input: "amazonaws.com.cn", - expected: "cn.com.amazonaws", - }, - { - name: "sc2s.sgov.gov", - input: "sc2s.sgov.gov", - expected: "gov.sgov.sc2s", - }, - { - name: "c2s.ic.gov", - input: "c2s.ic.gov", - expected: "gov.ic.c2s", - }, - } - - for _, testCase := range testCases { - testCase := testCase - t.Run(testCase.name, func(t *testing.T) { - t.Parallel() - - if got, want := ReverseDNS(testCase.input), testCase.expected; got != want { - t.Errorf("got: %s, expected: %s", got, want) - } - }) - } -} diff --git a/names/names.go b/names/names.go index 27a9ab96c31..4877b40dc7b 100644 --- a/names/names.go +++ b/names/names.go @@ -189,6 +189,17 @@ func PartitionForRegion(region string) string { } } +// ReverseDNS switches a DNS hostname to reverse DNS and vice-versa. +func ReverseDNS(hostname string) string { + parts := strings.Split(hostname, ".") + + for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 { + parts[i], parts[j] = parts[j], parts[i] + } + + return strings.Join(parts, ".") +} + // Type ServiceDatum corresponds closely to columns in `names_data.csv` and are // described in detail in README.md. type ServiceDatum struct { diff --git a/names/names_test.go b/names/names_test.go index b7f96f593fd..e6835c16f5f 100644 --- a/names/names_test.go +++ b/names/names_test.go @@ -11,6 +11,53 @@ import ( "testing" ) +func TestReverseDNS(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + input string + expected string + }{ + { + name: "empty", + input: "", + expected: "", + }, + { + name: "amazonaws.com", + input: "amazonaws.com", + expected: "com.amazonaws", + }, + { + name: "amazonaws.com.cn", + input: "amazonaws.com.cn", + expected: "cn.com.amazonaws", + }, + { + name: "sc2s.sgov.gov", + input: "sc2s.sgov.gov", + expected: "gov.sgov.sc2s", + }, + { + name: "c2s.ic.gov", + input: "c2s.ic.gov", + expected: "gov.ic.c2s", + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + if got, want := ReverseDNS(testCase.input), testCase.expected; got != want { + t.Errorf("got: %s, expected: %s", got, want) + } + }) + } +} + func TestProviderPackageForAlias(t *testing.T) { t.Parallel() From 5a726a164cf4c8568e94e8b51f369e9902296dfb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 15:51:40 -0500 Subject: [PATCH 347/438] r/aws_s3_bucket: Tidy up domain name functions. --- internal/service/s3/bucket.go | 809 ++++++++---------- internal/service/s3/bucket_data_source.go | 12 +- internal/service/s3/bucket_test.go | 47 +- .../s3/bucket_website_configuration.go | 10 +- internal/service/s3/exports_test.go | 2 + 5 files changed, 381 insertions(+), 499 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 18b5faa7fbb..58768bed0f8 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -15,16 +15,13 @@ import ( "strings" "time" - aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" - s3_sdkv2 "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/arn" - "github.com/aws/aws-sdk-go/aws/endpoints" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" - tfawserr_sdkv2 "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" + "github.com/YakDriver/regexache" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/aws/aws-sdk-go-v2/service/s3/types" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" @@ -33,6 +30,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/internal/create" + "github.com/hashicorp/terraform-provider-aws/internal/enum" "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" @@ -40,11 +38,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/tfresource" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" + "golang.org/x/exp/slices" ) const ( - resNameBucket = "Bucket" - // General timeout for S3 bucket changes to propagate. // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel. s3BucketPropagationTimeout = 2 * time.Minute // nosemgrep:ci.s3-in-const-name, ci.s3-in-var-name @@ -72,11 +69,11 @@ func ResourceBucket() *schema.Resource { Schema: map[string]*schema.Schema{ "acceleration_status": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Use the aws_s3_bucket_accelerate_configuration resource instead", - ValidateFunc: validation.StringInSlice(s3.BucketAccelerateStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the aws_s3_bucket_accelerate_configuration resource instead", + ValidateDiagFunc: enum.Validate[types.BucketAccelerateStatus](), }, "acl": { Type: schema.TypeString, @@ -175,18 +172,18 @@ func ResourceBucket() *schema.Resource { Required: true, Set: schema.HashString, Elem: &schema.Schema{ - Type: schema.TypeString, - ValidateFunc: validation.StringInSlice(s3.Permission_Values(), false), + Type: schema.TypeString, + ValidateDiagFunc: enum.Validate[types.Permission](), }, }, "type": { Type: schema.TypeString, Required: true, // TypeAmazonCustomerByEmail is not currently supported - ValidateFunc: validation.StringInSlice([]string{ - s3.TypeCanonicalUser, - s3.TypeGroup, - }, false), + ValidateFunc: validation.StringInSlice(enum.Slice( + types.TypeCanonicalUser, + types.TypeGroup, + ), false), }, "uri": { Type: schema.TypeString, @@ -268,9 +265,9 @@ func ResourceBucket() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.TransitionStorageClass](), }, }, }, @@ -296,9 +293,9 @@ func ResourceBucket() *schema.Resource { ValidateFunc: validation.IntAtLeast(0), }, "storage_class": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.TransitionStorageClass_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.TransitionStorageClass](), }, }, }, @@ -334,12 +331,12 @@ func ResourceBucket() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "object_lock_enabled": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ConflictsWith: []string{"object_lock_enabled"}, - ValidateFunc: validation.StringInSlice(s3.ObjectLockEnabled_Values(), false), - Deprecated: "Use the top-level parameter object_lock_enabled instead", + Type: schema.TypeString, + Optional: true, + ForceNew: true, + ConflictsWith: []string{"object_lock_enabled"}, + ValidateDiagFunc: enum.Validate[types.ObjectLockEnabled](), + Deprecated: "Use the top-level parameter object_lock_enabled instead", }, "rule": { Type: schema.TypeList, @@ -361,9 +358,9 @@ func ResourceBucket() *schema.Resource { ValidateFunc: validation.IntAtLeast(1), }, "mode": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ObjectLockRetentionMode_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ObjectLockRetentionMode](), }, "years": { Type: schema.TypeInt, @@ -423,7 +420,7 @@ func ResourceBucket() *schema.Resource { "delete_marker_replication_status": { Type: schema.TypeString, Optional: true, - ValidateFunc: validation.StringInSlice([]string{s3.DeleteMarkerReplicationStatusEnabled}, false), + ValidateFunc: validation.StringInSlice(enum.Slice(types.DeleteMarkerReplicationStatusEnabled), false), }, "destination": { Type: schema.TypeList, @@ -440,9 +437,9 @@ func ResourceBucket() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "owner": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.OwnerOverride_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.OwnerOverride](), }, }, }, @@ -470,10 +467,10 @@ func ResourceBucket() *schema.Resource { ValidateFunc: validation.IntBetween(10, 15), }, "status": { - Type: schema.TypeString, - Optional: true, - Default: s3.MetricsStatusEnabled, - ValidateFunc: validation.StringInSlice(s3.MetricsStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.MetricsStatusEnabled, + ValidateDiagFunc: enum.Validate[types.MetricsStatus](), }, }, }, @@ -495,18 +492,18 @@ func ResourceBucket() *schema.Resource { ValidateFunc: validation.IntBetween(15, 15), }, "status": { - Type: schema.TypeString, - Optional: true, - Default: s3.ReplicationTimeStatusEnabled, - ValidateFunc: validation.StringInSlice(s3.ReplicationTimeStatus_Values(), false), + Type: schema.TypeString, + Optional: true, + Default: types.ReplicationTimeStatusEnabled, + ValidateDiagFunc: enum.Validate[types.ReplicationTimeStatus](), }, }, }, }, "storage_class": { - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.StringInSlice(s3.StorageClass_Values(), false), + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.StorageClass](), }, }, }, @@ -566,9 +563,9 @@ func ResourceBucket() *schema.Resource { }, }, "status": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ReplicationRuleStatus_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ReplicationRuleStatus](), }, }, }, @@ -577,11 +574,11 @@ func ResourceBucket() *schema.Resource { }, }, "request_payer": { - Type: schema.TypeString, - Optional: true, - Computed: true, - Deprecated: "Use the aws_s3_bucket_request_payment_configuration resource instead", - ValidateFunc: validation.StringInSlice(s3.Payer_Values(), false), + Type: schema.TypeString, + Optional: true, + Computed: true, + Deprecated: "Use the aws_s3_bucket_request_payment_configuration resource instead", + ValidateDiagFunc: enum.Validate[types.Payer](), }, "server_side_encryption_configuration": { Type: schema.TypeList, @@ -608,9 +605,9 @@ func ResourceBucket() *schema.Resource { Optional: true, }, "sse_algorithm": { - Type: schema.TypeString, - Required: true, - ValidateFunc: validation.StringInSlice(s3.ServerSideEncryption_Values(), false), + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.ServerSideEncryption](), }, }, }, @@ -710,17 +707,16 @@ func ResourceBucket() *schema.Resource { func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) - connSDKv2 := meta.(*conns.AWSClient).S3Client(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) bucket := create.Name(d.Get("bucket").(string), d.Get("bucket_prefix").(string)) - awsRegion := meta.(*conns.AWSClient).Region + region := meta.(*conns.AWSClient).Region // Special case: us-east-1 does not return error if the bucket already exists and is owned by // current account. It also resets the Bucket ACLs. - if awsRegion == endpoints.UsEast1RegionID { - if err := findBucket(ctx, connSDKv2, bucket); err == nil { - return create.DiagError(names.S3, create.ErrActionCreating, resNameBucket, bucket, errors.New(ErrMessageBucketAlreadyExists)) + if region == names.USEast1RegionID { + if err := findBucket(ctx, conn, bucket); err == nil { + return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s): %s", bucket, errors.New(errCodeBucketAlreadyExists)) } } @@ -728,25 +724,25 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta inte Bucket: aws.String(bucket), // NOTE: Please, do not add any other fields here unless the field is // supported in *all* AWS partitions (including ISO partitions) and by - // 3rd party S3 providers. + // third-party S3 API implementations. } if v, ok := d.GetOk("acl"); ok { - input.ACL = aws.String(v.(string)) + input.ACL = types.BucketCannedACL(v.(string)) } else { // Use default value previously available in v3.x of the provider. - input.ACL = aws.String(s3.BucketCannedACLPrivate) + input.ACL = types.BucketCannedACLPrivate } // Special case us-east-1 region and do not set the LocationConstraint. // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html - if awsRegion != endpoints.UsEast1RegionID { - input.CreateBucketConfiguration = &s3.CreateBucketConfiguration{ - LocationConstraint: aws.String(awsRegion), + if region != names.USEast1RegionID { + input.CreateBucketConfiguration = &types.CreateBucketConfiguration{ + LocationConstraint: types.BucketLocationConstraint(region), } } - if err := ValidBucketName(bucket, awsRegion); err != nil { + if err := validBucketName(bucket, region); err != nil { return sdkdiag.AppendErrorf(diags, "validating S3 Bucket (%s) name: %s", bucket, err) } @@ -757,26 +753,26 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta inte // S3 Object Lock can only be enabled on bucket creation. objectLockConfiguration := expandObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) - if objectLockConfiguration != nil && aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled { + if objectLockConfiguration != nil && objectLockConfiguration.ObjectLockEnabled == types.ObjectLockEnabledEnabled { input.ObjectLockEnabledForBucket = aws.Bool(true) } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return conn.CreateBucketWithContext(ctx, input) + return conn.CreateBucket(ctx, input) }, errCodeOperationAborted) if err != nil { - return create.DiagError(names.S3, create.ErrActionCreating, resNameBucket, bucket, err) + return sdkdiag.AppendErrorf(diags, "creating S3 Bucket (%s): %s", bucket, err) } d.SetId(bucket) _, err = tfresource.RetryWhenNotFound(ctx, d.Timeout(schema.TimeoutCreate), func() (interface{}, error) { - return nil, findBucket(ctx, connSDKv2, d.Id()) + return nil, findBucket(ctx, conn, d.Id()) }) if err != nil { - return create.DiagError(names.S3, create.ErrActionWaitingForCreation, resNameBucket, bucket, err) + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket (%s) create: %s", d.Id(), err) } return append(diags, resourceBucketUpdate(ctx, d, meta)...) @@ -784,10 +780,9 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta inte func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) - connSDKv2 := meta.(*conns.AWSClient).S3Client(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) - err := findBucket(ctx, connSDKv2, d.Id()) + err := findBucket(ctx, conn, d.Id()) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) @@ -796,7 +791,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf } if err != nil { - return create.DiagError(names.S3, create.ErrActionReading, resNameBucket, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "reading S3 Bucket (%s): %s", d.Id(), err) } arn := arn.ARN{ @@ -806,431 +801,353 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf }.String() d.Set("arn", arn) d.Set("bucket", d.Id()) - d.Set("bucket_domain_name", meta.(*conns.AWSClient).PartitionHostname(fmt.Sprintf("%s.s3", d.Get("bucket").(string)))) - d.Set("bucket_prefix", create.NamePrefixFromName(d.Get("bucket").(string))) + d.Set("bucket_domain_name", meta.(*conns.AWSClient).PartitionHostname(d.Id()+".s3")) + d.Set("bucket_prefix", create.NamePrefixFromName(d.Id())) - // Read the policy if configured outside this resource e.g. with aws_s3_bucket_policy resource - pol, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketPolicyWithContext(ctx, &s3.GetBucketPolicyInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + // + // Bucket Policy. + // + + // Read the policy if configured outside this resource e.g. with aws_s3_bucket_policy resource. + policy, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (string, error) { + return findBucketPolicy(ctx, conn, d.Id()) + }) // The call to HeadBucket above can occasionally return no error (i.e. NoSuchBucket) // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls // such as GetBucketPolicy, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNoSuchBucketPolicy, errCodeNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 bucket (%s) policy: %s", d.Id(), err) - } - - if output, ok := pol.(*s3.GetBucketPolicyOutput); ok { - policyToSet, err := verify.PolicyToSet(d.Get("policy").(string), aws.StringValue(output.Policy)) + switch { + case err == nil: + policyToSet, err := verify.PolicyToSet(d.Get("policy").(string), policy) if err != nil { - return sdkdiag.AppendErrorf(diags, "while setting policy (%s), encountered: %s", aws.StringValue(output.Policy), err) + return sdkdiag.AppendFromErr(diags, err) } d.Set("policy", policyToSet) - } else { + case tfawserr.ErrCodeEquals(err, errCodeNoSuchBucketPolicy, errCodeNotImplemented, errCodeXNotImplemented): d.Set("policy", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) policy: %s", d.Id(), err) } - // Read the Grant ACL. - // In the event grants are not configured on the bucket, the API returns an empty array - apResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketAclWithContext(ctx, &s3.GetBucketAclInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + // + // Bucket ACL. + // - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketAcl, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + bucketACL, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketAclOutput, error) { + return findBucketACL(ctx, conn, d.Id(), "") + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket (%s) ACL: %s", d.Id(), err) - } - - if aclOutput, ok := apResponse.(*s3.GetBucketAclOutput); ok { - if err := d.Set("grant", flattenGrants(aclOutput)); err != nil { - return sdkdiag.AppendErrorf(diags, "setting grant %s", err) + switch { + case err == nil: + if err := d.Set("grant", flattenGrants(bucketACL)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting grant: %s", err) } - } else { + case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): d.Set("grant", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) ACL: %s", d.Id(), err) } - // Read the CORS - corsResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketCorsWithContext(ctx, &s3.GetBucketCorsInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + // + // Bucket CORS Configuration. + // - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketCors, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + corsRules, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() ([]types.CORSRule, error) { + return findCORSRules(ctx, conn, d.Id(), "") + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeNotImplemented, errCodeXNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket CORS configuration: %s", err) - } - - if output, ok := corsResponse.(*s3.GetBucketCorsOutput); ok { - if err := d.Set("cors_rule", flattenBucketCorsRules(output.CORSRules)); err != nil { + switch { + case err == nil: + if err := d.Set("cors_rule", flattenBucketCorsRules(corsRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting cors_rule: %s", err) } - } else { + case tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeNotImplemented, errCodeXNotImplemented): d.Set("cors_rule", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) CORS configuration: %s", d.Id(), err) } - // Read the website configuration - wsResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketWebsiteWithContext(ctx, &s3.GetBucketWebsiteInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + // + // Bucket Website Configuration. + // - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketWebsite, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + bucketWebsite, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketWebsiteOutput, error) { + return findBucketWebsite(ctx, conn, d.Id(), "") + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, - errCodeMethodNotAllowed, - errCodeNotImplemented, - errCodeNoSuchWebsiteConfiguration, - errCodeXNotImplemented, - ) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket website configuration: %s", err) - } - - if ws, ok := wsResponse.(*s3.GetBucketWebsiteOutput); ok { - website, err := flattenBucketWebsite(ws) + switch { + case err == nil: + website, err := flattenBucketWebsite(bucketWebsite) if err != nil { - return sdkdiag.AppendErrorf(diags, "setting website: %s", err) + return sdkdiag.AppendFromErr(diags, err) } if err := d.Set("website", website); err != nil { return sdkdiag.AppendErrorf(diags, "setting website: %s", err) } - } else { + case tfawserr.ErrCodeEquals(err, errCodeNoSuchWebsiteConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("website", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) website configuration: %s", d.Id(), err) } - // Read the versioning configuration + // + // Bucket Versioning. + // - versioningResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketVersioningWithContext(ctx, &s3.GetBucketVersioningInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + bucketVersioning, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketVersioningOutput, error) { + return findBucketVersioning(ctx, conn, d.Id(), "") + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketVersioning, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket versioning (%s): %s", d.Id(), err) - } - - if versioning, ok := versioningResponse.(*s3.GetBucketVersioningOutput); ok { - if err := d.Set("versioning", flattenVersioning(versioning)); err != nil { + switch { + case err == nil: + if err := d.Set("versioning", flattenVersioning(bucketVersioning)); err != nil { return sdkdiag.AppendErrorf(diags, "setting versioning: %s", err) } + case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + d.Set("versioning", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) versioning: %s", d.Id(), err) } - // Read the acceleration status + // + // Bucket Accelerate Configuration. + // - accelerateResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketAccelerateConfigurationWithContext(ctx, &s3.GetBucketAccelerateConfigurationInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + bucketAccelerate, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketAccelerateConfigurationOutput, error) { + return findBucketAccelerateConfiguration(ctx, conn, d.Id(), "") + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketAccelerateConfiguration, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - // Amazon S3 Transfer Acceleration might not be supported in the region - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeUnsupportedArgument, errCodeNotImplemented, errCodeXNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket (%s) accelerate configuration: %s", d.Id(), err) + switch { + case err == nil: + d.Set("acceleration_status", bucketAccelerate.Status) + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeUnsupportedArgument, errCodeNotImplemented, errCodeXNotImplemented): + d.Set("acceleration_status", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) accelerate configuration: %s", d.Id(), err) } - if accelerate, ok := accelerateResponse.(*s3.GetBucketAccelerateConfigurationOutput); ok { - d.Set("acceleration_status", accelerate.Status) - } + // + // Bucket Request Payment Configuration. + // - // Read the request payer configuration. - - payerResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketRequestPaymentWithContext(ctx, &s3.GetBucketRequestPaymentInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + bucketRequestPayment, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketRequestPaymentOutput, error) { + return findBucketRequestPayment(ctx, conn, d.Id(), "") + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketRequestPayment, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket request payment: %s", err) + switch { + case err == nil: + d.Set("request_payer", bucketRequestPayment.Payer) + case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + d.Set("request_payer", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) request payment configuration: %s", d.Id(), err) } - if payer, ok := payerResponse.(*s3.GetBucketRequestPaymentOutput); ok { - d.Set("request_payer", payer.Payer) - } + // + // Bucket Logging. + // - // Read the logging configuration if configured outside this resource - loggingResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketLoggingWithContext(ctx, &s3.GetBucketLoggingInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + loggingEnabled, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.LoggingEnabled, error) { + return findLoggingEnabled(ctx, conn, d.Id(), "") + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketLogging, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket logging: %s", err) - } - - if logging, ok := loggingResponse.(*s3.GetBucketLoggingOutput); ok { - if err := d.Set("logging", flattenBucketLoggingEnabled(logging.LoggingEnabled)); err != nil { + switch { + case err == nil: + if err := d.Set("logging", flattenBucketLoggingEnabled(loggingEnabled)); err != nil { return sdkdiag.AppendErrorf(diags, "setting logging: %s", err) } - } else { + case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): d.Set("logging", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) logging: %s", d.Id(), err) } - // Read the lifecycle configuration + // + // Bucket Lifecycle Configuration. + // - lifecycleResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketLifecycleConfigurationWithContext(ctx, &s3.GetBucketLifecycleConfigurationInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + lifecycleRules, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() ([]types.LifecycleRule, error) { + return findLifecycleRules(ctx, conn, d.Id(), "") + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketLifecycleConfiguration, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeNoSuchLifecycleConfiguration, errCodeNotImplemented, errCodeXNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket (%s) Lifecycle Configuration: %s", d.Id(), err) - } - - if lifecycle, ok := lifecycleResponse.(*s3.GetBucketLifecycleConfigurationOutput); ok { - if err := d.Set("lifecycle_rule", flattenBucketLifecycleRules(ctx, lifecycle.Rules)); err != nil { + switch { + case err == nil: + if err := d.Set("lifecycle_rule", flattenBucketLifecycleRules(ctx, lifecycleRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lifecycle_rule: %s", err) } - } else { + case tfawserr.ErrCodeEquals(err, errCodeNoSuchLifecycleConfiguration, errCodeNotImplemented, errCodeXNotImplemented): d.Set("lifecycle_rule", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) lifecycle configuration: %s", d.Id(), err) } - // Read the bucket replication configuration if configured outside this resource + // + // Bucket Replication Configuration. + // - replicationResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketReplicationWithContext(ctx, &s3.GetBucketReplicationInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + replicationConfiguration, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.ReplicationConfiguration, error) { + return findReplicationConfiguration(ctx, conn, d.Id()) + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketReplication, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket replication: %s", err) - } - - if replication, ok := replicationResponse.(*s3.GetBucketReplicationOutput); ok { - if err := d.Set("replication_configuration", flattenBucketReplicationConfiguration(ctx, replication.ReplicationConfiguration)); err != nil { + switch { + case err == nil: + if err := d.Set("replication_configuration", flattenBucketReplicationConfiguration(ctx, replicationConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting replication_configuration: %s", err) } - } else { - // Still need to set for the non-existent case + case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented): d.Set("replication_configuration", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) replication configuration: %s", d.Id(), err) } - // Read the bucket server side encryption configuration + // + // Bucket Server-side Encryption Configuration. + // - encryptionResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetBucketEncryptionWithContext(ctx, &s3.GetBucketEncryptionInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + encryptionConfiguration, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.ServerSideEncryptionConfiguration, error) { + return findServerSideEncryptionConfiguration(ctx, conn, d.Id(), "") + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketEncryption, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeServerSideEncryptionConfigurationNotFound) { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket encryption: %s", err) - } - - if encryption, ok := encryptionResponse.(*s3.GetBucketEncryptionOutput); ok { - if err := d.Set("server_side_encryption_configuration", flattenServerSideEncryptionConfiguration(encryption.ServerSideEncryptionConfiguration)); err != nil { + switch { + case err == nil: + if err := d.Set("server_side_encryption_configuration", flattenServerSideEncryptionConfiguration(encryptionConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting server_side_encryption_configuration: %s", err) } - } else { + case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented): d.Set("server_side_encryption_configuration", nil) + default: + return diag.Errorf("reading S3 Bucket (%s) server-side encryption configuration: %s", d.Id(), err) } - // Object Lock configuration. - resp, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return conn.GetObjectLockConfigurationWithContext(ctx, &s3.GetObjectLockConfigurationInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) + // + // Bucket Object Lock Configuration. + // - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetObjectLockConfiguration, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + objLockConfig, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.ObjectLockConfiguration, error) { + return findObjectLockConfiguration(ctx, conn, d.Id(), "") + }) + + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - // Object lock not supported in all partitions (extra guard, also guards in read func) - if err != nil && !tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented) && !tfawserr.ErrCodeContains(err, errCodeObjectLockConfigurationNotFound) { - if meta.(*conns.AWSClient).Partition == endpoints.AwsPartitionID || meta.(*conns.AWSClient).Partition == endpoints.AwsUsGovPartitionID { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket (%s) Object Lock configuration: %s", d.Id(), err) - } - } - - if err != nil { - log.Printf("[WARN] Unable to read S3 bucket (%s) Object Lock Configuration: %s", d.Id(), err) - } - - if output, ok := resp.(*s3.GetObjectLockConfigurationOutput); ok && output.ObjectLockConfiguration != nil { - d.Set("object_lock_enabled", aws.StringValue(output.ObjectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled) - if err := d.Set("object_lock_configuration", flattenObjectLockConfiguration(output.ObjectLockConfiguration)); err != nil { + switch { + case err == nil: + if err := d.Set("object_lock_configuration", flattenObjectLockConfiguration(objLockConfig)); err != nil { return sdkdiag.AppendErrorf(diags, "setting object_lock_configuration: %s", err) } - } else { + d.Set("object_lock_enabled", objLockConfig.ObjectLockEnabled == types.ObjectLockEnabledEnabled) + case tfawserr.ErrCodeEquals(err, errCodeObjectLockConfigurationNotFoundError, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + d.Set("object_lock_configuration", nil) d.Set("object_lock_enabled", nil) + default: + if partition := meta.(*conns.AWSClient).Partition; partition == names.StandardPartitionID || partition == names.USGovCloudPartitionID { + return diag.Errorf("reading S3 Bucket (%s) object lock configuration: %s", d.Id(), err) + } + log.Printf("[WARN] Unable to read S3 Bucket (%s) Object Lock Configuration: %s", d.Id(), err) d.Set("object_lock_configuration", nil) + d.Set("object_lock_enabled", nil) } - // Add the region as an attribute - discoveredRegion, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return s3manager.GetBucketRegionWithClient(ctx, conn, d.Id(), func(r *request.Request) { - // By default, GetBucketRegion forces virtual host addressing, which - // is not compatible with many non-AWS implementations. Instead, pass - // the provider s3_force_path_style configuration, which defaults to - // false, but allows override. - r.Config.S3ForcePathStyle = conn.Config.S3ForcePathStyle - - // By default, GetBucketRegion uses anonymous credentials when doing - // a HEAD request to get the bucket region. This breaks in aws-cn regions - // when the account doesn't have an ICP license to host public content. - // Use the current credentials when getting the bucket region. - r.Config.Credentials = conn.Config.Credentials - }) - }, "NotFound") + region, err := manager.GetBucketRegion(ctx, conn, d.Id(), func(o *s3.Options) { + o.UsePathStyle = meta.(*conns.AWSClient).S3UsePathStyle() + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as s3manager.GetBucketRegionWithClient, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket location: %s", err) + return sdkdiag.AppendErrorf(diags, "reading S3 Bucket (%s) location: %s", d.Id(), err) } - region := discoveredRegion.(string) d.Set("region", region) - // Add the bucket_regional_domain_name as an attribute - regionalEndpoint, err := BucketRegionalDomainName(d.Get("bucket").(string), region) - if err != nil { - return sdkdiag.AppendErrorf(diags, "getting S3 Bucket regional domain name: %s", err) - } - d.Set("bucket_regional_domain_name", regionalEndpoint) + d.Set("bucket_regional_domain_name", bucketRegionalDomainName(d.Id(), region)) // Add the hosted zone ID for this bucket's region as an attribute - hostedZoneID, err := HostedZoneIDForRegion(region) + hostedZoneID, err := hostedZoneIDForRegion(region) if err != nil { log.Printf("[WARN] %s", err) } else { d.Set("hosted_zone_id", hostedZoneID) } - // Add website_endpoint as an attribute - websiteEndpoint, err := websiteEndpoint(ctx, meta.(*conns.AWSClient), d) - - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketLocation, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) - d.SetId("") - return diags - } - if err != nil { - return sdkdiag.AppendErrorf(diags, "reading S3 Bucket (%s): %s", d.Id(), err) - } - - if websiteEndpoint != nil { - d.Set("website_endpoint", websiteEndpoint.Endpoint) - d.Set("website_domain", websiteEndpoint.Domain) + if _, ok := d.GetOk("website"); ok { + endpoint, domain := bucketWebsiteEndpointAndDomain(d.Id(), region) + d.Set("website_domain", domain) + d.Set("website_endpoint", endpoint) } // Retry due to S3 eventual consistency @@ -1434,16 +1351,16 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta inte return nil } -func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string, optFns ...func(*s3_sdkv2.Options)) error { - input := &s3_sdkv2.HeadBucketInput{ - Bucket: aws_sdkv2.String(bucket), +func findBucket(ctx context.Context, conn *s3.Client, bucket string, optFns ...func(*s3.Options)) error { + input := &s3.HeadBucketInput{ + Bucket: aws.String(bucket), } _, err := conn.HeadBucket(ctx, input, optFns...) // For directory buckets that no longer exist it's the CreateSession call invoked by HeadBucket that returns "NoSuchBucket", // and that error code is flattend into HeadBucket's error message -- hence the 'errs.Contains' call. - if tfawserr_sdkv2.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) || tfawserr_sdkv2.ErrCodeEquals(err, errCodeNoSuchBucket) || errs.Contains(err, errCodeNoSuchBucket) { + if tfawserr.ErrHTTPStatusCodeEquals(err, http.StatusNotFound) || tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) || errs.Contains(err, errCodeNoSuchBucket) { return &retry.NotFoundError{ LastError: err, LastRequest: input, @@ -1453,102 +1370,59 @@ func findBucket(ctx context.Context, conn *s3_sdkv2.Client, bucket string, optFn return err } -// https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region -func BucketRegionalDomainName(bucket string, region string) (string, error) { - // Return a default AWS Commercial domain name if no region is provided - // Otherwise EndpointFor() will return BUCKET.s3..amazonaws.com - if region == "" { - return fmt.Sprintf("%s.s3.amazonaws.com", bucket), nil //lintignore:AWSR001 - } - endpoint, err := endpoints.DefaultResolver().EndpointFor(s3.EndpointsID, region, func(o *endpoints.Options) { - // By default, EndpointFor uses the legacy endpoint for S3 in the us-east-1 region - o.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint - }) +func retryWhenNoSuchBucketError[T any](ctx context.Context, timeout time.Duration, f func() (T, error)) (T, error) { + outputRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { + return f() + }, errCodeNoSuchBucket) + if err != nil { - return "", err + var zero T + return zero, err } - return fmt.Sprintf("%s.%s", bucket, strings.TrimPrefix(endpoint.URL, "https://")), nil -} - -type S3Website struct { - Endpoint, Domain string -} -func WebsiteEndpoint(client *conns.AWSClient, bucket string, region string) *S3Website { - domain := WebsiteDomainURL(client, region) - return &S3Website{Endpoint: fmt.Sprintf("%s.%s", bucket, domain), Domain: domain} + return outputRaw.(T), nil } -func WebsiteDomainURL(client *conns.AWSClient, region string) string { - region = normalizeRegion(region) - - // Different regions have different syntax for website endpoints - // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html - // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints - if isOldRegion(region) { - return fmt.Sprintf("s3-website-%s.amazonaws.com", region) //lintignore:AWSR001 +// https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +func bucketRegionalDomainName(bucket, region string) string { + // Return a default AWS Commercial domain name if no Region is provided. + if region == "" { + return fmt.Sprintf("%s.s3.amazonaws.com", bucket) //lintignore:AWSR001 } - return client.RegionalHostname("s3-website") + return fmt.Sprintf("%s.s3.%s.%s", bucket, region, names.DNSSuffixForPartition(names.PartitionForRegion(region))) } -func websiteEndpoint(ctx context.Context, client *conns.AWSClient, d *schema.ResourceData) (*S3Website, error) { - // If the bucket doesn't have a website configuration, return an empty - // endpoint - if _, ok := d.GetOk("website"); !ok { - return nil, nil - } - - bucket := d.Get("bucket").(string) - - // Lookup the region for this bucket - - locationResponse, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { - return client.S3Conn(ctx).GetBucketLocation( - &s3.GetBucketLocationInput{ - Bucket: aws.String(bucket), - }, - ) - }, s3.ErrCodeNoSuchBucket) - if err != nil { - return nil, err - } - location := locationResponse.(*s3.GetBucketLocationOutput) - var region string - if location.LocationConstraint != nil { - region = aws.StringValue(location.LocationConstraint) - } - - return WebsiteEndpoint(client, bucket, region), nil -} +func bucketWebsiteEndpointAndDomain(bucket, region string) (string, string) { + var domain string -func isOldRegion(region string) bool { - oldRegions := []string{ - endpoints.ApNortheast1RegionID, - endpoints.ApSoutheast1RegionID, - endpoints.ApSoutheast2RegionID, - endpoints.EuWest1RegionID, - endpoints.SaEast1RegionID, - endpoints.UsEast1RegionID, - endpoints.UsGovWest1RegionID, - endpoints.UsWest1RegionID, - endpoints.UsWest2RegionID, - } - for _, r := range oldRegions { - if region == r { - return true - } - } - return false -} - -func normalizeRegion(region string) string { // Default to us-east-1 if the bucket doesn't have a region: // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html if region == "" { - region = endpoints.UsEast1RegionID + region = names.USEast1RegionID + } + + // Different regions have different syntax for website endpoints: + // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints + oldRegions := []string{ + names.APNortheast1RegionID, + names.APSoutheast1RegionID, + names.APSoutheast2RegionID, + names.EUWest1RegionID, + names.SAEast1RegionID, + names.USEast1RegionID, + names.USGovWest1RegionID, + names.USWest1RegionID, + names.USWest2RegionID, + } + if slices.Contains(oldRegions, region) { + domain = fmt.Sprintf("s3-website-%s.amazonaws.com", region) //lintignore:AWSR001 + } else { + dnsSuffix := names.DNSSuffixForPartition(names.PartitionForRegion(region)) + domain = fmt.Sprintf("s3-website.%s.%s", region, dnsSuffix) } - return region + return fmt.Sprintf("%s.%s", bucket, domain), domain } ////////////////////////////////////////// Argument-Specific Update Functions ////////////////////////////////////////// @@ -2434,17 +2308,17 @@ func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{ // Object Lock Configuration functions -func expandObjectLockConfiguration(vConf []interface{}) *s3.ObjectLockConfiguration { +func expandObjectLockConfiguration(vConf []interface{}) *types.ObjectLockConfiguration { if len(vConf) == 0 || vConf[0] == nil { return nil } mConf := vConf[0].(map[string]interface{}) - conf := &s3.ObjectLockConfiguration{} + conf := &types.ObjectLockConfiguration{} if vObjectLockEnabled, ok := mConf["object_lock_enabled"].(string); ok && vObjectLockEnabled != "" { - conf.ObjectLockEnabled = aws.String(vObjectLockEnabled) + conf.ObjectLockEnabled = types.ObjectLockEnabled(vObjectLockEnabled) } if vRule, ok := mConf["rule"].([]interface{}); ok && len(vRule) > 0 { @@ -2453,18 +2327,18 @@ func expandObjectLockConfiguration(vConf []interface{}) *s3.ObjectLockConfigurat if vDefaultRetention, ok := mRule["default_retention"].([]interface{}); ok && len(vDefaultRetention) > 0 && vDefaultRetention[0] != nil { mDefaultRetention := vDefaultRetention[0].(map[string]interface{}) - conf.Rule = &s3.ObjectLockRule{ - DefaultRetention: &s3.DefaultRetention{}, + conf.Rule = &types.ObjectLockRule{ + DefaultRetention: &types.DefaultRetention{}, } if vMode, ok := mDefaultRetention["mode"].(string); ok && vMode != "" { - conf.Rule.DefaultRetention.Mode = aws.String(vMode) + conf.Rule.DefaultRetention.Mode = types.ObjectLockRetentionMode(vMode) } if vDays, ok := mDefaultRetention["days"].(int); ok && vDays > 0 { - conf.Rule.DefaultRetention.Days = aws.Int64(int64(vDays)) + conf.Rule.DefaultRetention.Days = aws.Int32(int32(vDays)) } if vYears, ok := mDefaultRetention["years"].(int); ok && vYears > 0 { - conf.Rule.DefaultRetention.Years = aws.Int64(int64(vYears)) + conf.Rule.DefaultRetention.Years = aws.Int32(int32(vYears)) } } } @@ -3140,3 +3014,48 @@ func removeNil(data map[string]interface{}) map[string]interface{} { return withoutNil } + +// validBucketName validates any S3 bucket name that is not inside the us-east-1 region. +// Buckets outside of this region have to be DNS-compliant. After the same restrictions are +// applied to buckets in the us-east-1 region, this function can be refactored as a SchemaValidateFunc +func validBucketName(value string, region string) error { + if region != names.USEast1RegionID { + if (len(value) < 3) || (len(value) > 63) { + return fmt.Errorf("%q must contain from 3 to 63 characters", value) + } + if !regexache.MustCompile(`^[0-9a-z-.]+$`).MatchString(value) { + return fmt.Errorf("only lowercase alphanumeric characters and hyphens allowed in %q", value) + } + if regexache.MustCompile(`^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$`).MatchString(value) { + return fmt.Errorf("%q must not be formatted as an IP address", value) + } + if strings.HasPrefix(value, `.`) { + return fmt.Errorf("%q cannot start with a period", value) + } + if strings.HasSuffix(value, `.`) { + return fmt.Errorf("%q cannot end with a period", value) + } + if strings.Contains(value, `..`) { + return fmt.Errorf("%q can be only one period between labels", value) + } + } else { + if len(value) > 255 { + return fmt.Errorf("%q must contain less than 256 characters", value) + } + if !regexache.MustCompile(`^[0-9A-Za-z_.-]+$`).MatchString(value) { + return fmt.Errorf("only alphanumeric characters, hyphens, periods, and underscores allowed in %q", value) + } + } + return nil +} + +func validBucketLifecycleTimestamp(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + _, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", value)) + if err != nil { + errors = append(errors, fmt.Errorf( + "%q cannot be parsed as RFC3339 Timestamp Format", value)) + } + + return +} diff --git a/internal/service/s3/bucket_data_source.go b/internal/service/s3/bucket_data_source.go index f61cd423eb9..20c70f4e464 100644 --- a/internal/service/s3/bucket_data_source.go +++ b/internal/service/s3/bucket_data_source.go @@ -100,11 +100,7 @@ func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, meta inte }.String() d.Set("arn", arn) d.Set("bucket_domain_name", awsClient.PartitionHostname(fmt.Sprintf("%s.s3", bucket))) - if regionalDomainName, err := BucketRegionalDomainName(bucket, region); err == nil { - d.Set("bucket_regional_domain_name", regionalDomainName) - } else { - log.Printf("[WARN] BucketRegionalDomainName: %s", err) - } + d.Set("bucket_regional_domain_name", bucketRegionalDomainName(bucket, region)) if hostedZoneID, err := hostedZoneIDForRegion(region); err == nil { d.Set("hosted_zone_id", hostedZoneID) } else { @@ -112,9 +108,9 @@ func dataSourceBucketRead(ctx context.Context, d *schema.ResourceData, meta inte } d.Set("region", region) if _, err := findBucketWebsite(ctx, conn, bucket, ""); err == nil { - website := WebsiteEndpoint(awsClient, bucket, region) - d.Set("website_domain", website.Domain) - d.Set("website_endpoint", website.Endpoint) + endpoint, domain := bucketWebsiteEndpointAndDomain(bucket, region) + d.Set("website_domain", domain) + d.Set("website_endpoint", endpoint) } else if !tfresource.NotFound(err) { log.Printf("[WARN] Reading S3 Bucket (%s) Website: %s", bucket, err) } diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 03d27eb9511..6dff0e640ce 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2397,13 +2397,7 @@ func TestBucketRegionalDomainName(t *testing.T) { } for _, tc := range testCases { - output, err := tfs3.BucketRegionalDomainName(bucket, tc.Region) - if tc.ExpectedErrCount == 0 && err != nil { - t.Fatalf("expected %q not to trigger an error, received: %s", tc.Region, err) - } - if tc.ExpectedErrCount > 0 && err == nil { - t.Fatalf("expected %q to trigger an error", tc.Region) - } + output := tfs3.BucketRegionalDomainName(bucket, tc.Region) if output != tc.ExpectedOutput { t.Fatalf("expected %q, received %q", tc.ExpectedOutput, output) } @@ -2420,59 +2414,35 @@ func TestWebsiteEndpoint(t *testing.T) { Expected string }{ { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: names.USEast1RegionID, - }, LocationConstraint: "", Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", names.USEast1RegionID, acctest.PartitionDNSSuffix()), }, { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: names.USEast2RegionID, - }, LocationConstraint: names.USEast2RegionID, Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USEast2RegionID, acctest.PartitionDNSSuffix()), }, { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com", - Region: names.USGovEast1RegionID, - }, LocationConstraint: names.USGovEast1RegionID, Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USGovEast1RegionID, acctest.PartitionDNSSuffix()), }, { - TestingClient: &conns.AWSClient{ - DNSSuffix: "c2s.ic.gov", - Region: "us-iso-east-1", - }, LocationConstraint: "us-iso-east-1", Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", "us-iso-east-1"), }, { - TestingClient: &conns.AWSClient{ - DNSSuffix: "sc2s.sgov.gov", - Region: "us-isob-east-1", - }, LocationConstraint: "us-isob-east-1", Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", "us-isob-east-1"), }, { - TestingClient: &conns.AWSClient{ - DNSSuffix: "amazonaws.com.cn", - Region: names.CNNorth1RegionID, - }, LocationConstraint: names.CNNorth1RegionID, Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com.cn", names.CNNorth1RegionID), }, } for _, testCase := range testCases { - got := tfs3.WebsiteEndpoint(testCase.TestingClient, "bucket-name", testCase.LocationConstraint) - if got.Endpoint != testCase.Expected { - t.Errorf("WebsiteEndpointUrl(\"bucket-name\", %q) => %q, want %q", testCase.LocationConstraint, got.Endpoint, testCase.Expected) + got, _ := tfs3.BucketWebsiteEndpointAndDomain("bucket-name", testCase.LocationConstraint) + if got != testCase.Expected { + t.Errorf("BucketWebsiteEndpointAndDomain(\"bucket-name\", %q) => %q, want %q", testCase.LocationConstraint, got, testCase.Expected) } } } @@ -2693,17 +2663,12 @@ func testAccCheckBucketDomainName(resourceName string, attributeName string, buc } func testAccBucketRegionalDomainName(bucket, region string) string { - regionalEndpoint, err := tfs3.BucketRegionalDomainName(bucket, region) - if err != nil { - return fmt.Sprintf("regional endpoint not found for S3 Bucket (%s)", bucket) - } - return regionalEndpoint + return tfs3.BucketRegionalDomainName(bucket, region) } func testAccCheckBucketWebsiteEndpoint(resourceName string, attributeName string, bucketName string, region string) resource.TestCheckFunc { return func(s *terraform.State) error { - website := tfs3.WebsiteEndpoint(acctest.Provider.Meta().(*conns.AWSClient), bucketName, region) - expectedValue := website.Endpoint + expectedValue, _ := tfs3.BucketWebsiteEndpointAndDomain(bucketName, region) return resource.TestCheckResourceAttr(resourceName, attributeName, expectedValue)(s) } diff --git a/internal/service/s3/bucket_website_configuration.go b/internal/service/s3/bucket_website_configuration.go index ef9c5445c13..6f112f6e237 100644 --- a/internal/service/s3/bucket_website_configuration.go +++ b/internal/service/s3/bucket_website_configuration.go @@ -236,7 +236,7 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res }) if err != nil { - return diag.Errorf("waiting for S3 Bucket Accelerate Configuration (%s) create: %s", d.Id(), err) + return diag.Errorf("waiting for S3 Bucket Website Configuration (%s) create: %s", d.Id(), err) } return resourceBucketWebsiteConfigurationRead(ctx, d, meta) @@ -289,9 +289,9 @@ func resourceBucketWebsiteConfigurationRead(ctx context.Context, d *schema.Resou if output, err := findBucketLocation(ctx, conn, bucket, expectedBucketOwner); err != nil { return diag.Errorf("reading S3 Bucket (%s) Location: %s", d.Id(), err) } else { - website := WebsiteEndpoint(meta.(*conns.AWSClient), bucket, string(output.LocationConstraint)) - d.Set("website_domain", website.Domain) - d.Set("website_endpoint", website.Endpoint) + endpoint, domain := bucketWebsiteEndpointAndDomain(bucket, string(output.LocationConstraint)) + d.Set("website_domain", domain) + d.Set("website_endpoint", endpoint) } return nil @@ -391,7 +391,7 @@ func resourceBucketWebsiteConfigurationDelete(ctx context.Context, d *schema.Res }) if err != nil { - return diag.Errorf("waiting for S3 Bucket Accelerate Configuration (%s) delete: %s", d.Id(), err) + return diag.Errorf("waiting for S3 Bucket Website Configuration (%s) delete: %s", d.Id(), err) } return nil diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 7f811f45450..5231c4aad71 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -7,6 +7,8 @@ package s3 var ( ResourceDirectoryBucket = newDirectoryBucketResource + BucketRegionalDomainName = bucketRegionalDomainName + BucketWebsiteEndpointAndDomain = bucketWebsiteEndpointAndDomain DeleteAllObjectVersions = deleteAllObjectVersions EmptyBucket = emptyBucket FindAnalyticsConfiguration = findAnalyticsConfiguration From 24cb0c329c5768361b210ed0f677bfbefedeef06 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 10:43:53 -0500 Subject: [PATCH 348/438] r/aws_finspace_kx_dataview(test): add _tags test --- internal/service/finspace/kx_dataview.go | 12 +++ internal/service/finspace/kx_dataview_test.go | 95 +++++++++++++++++++ 2 files changed, 107 insertions(+) diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 9618cd46bc3..84d90f267da 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -185,10 +185,22 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta if out == nil || out.DataviewName == nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Get("name").(string), errors.New("empty output")) } + if _, err := waitKxDataviewCreated(ctx, conn, d.Id(), d.Timeout(schema.TimeoutCreate)); err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err) } + // The CreateKxDataview API currently fails to tag the Dataview when the + // Tags field is set. Until the API is fixed, tag after creation instead. + // + // TODO: the identifier passed to createTags here likely needs to be an ARN, but this attribute + // is not returned from the create or describe APIs. The ARN may need to be manually constructed + // in order for tag after create to function. + // + // if err := createTags(ctx, conn, aws.ToString(out.DataviewName), getTagsIn(ctx)); err != nil { + // return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Id(), err) + // } + return append(diags, resourceKxDataviewRead(ctx, d, meta)...) } diff --git a/internal/service/finspace/kx_dataview_test.go b/internal/service/finspace/kx_dataview_test.go index 0dda532a630..f3573ea0d99 100644 --- a/internal/service/finspace/kx_dataview_test.go +++ b/internal/service/finspace/kx_dataview_test.go @@ -88,6 +88,62 @@ func TestAccFinSpaceKxDataview_disappears(t *testing.T) { }) } +func TestAccFinSpaceKxDataview_tags(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + ctx := acctest.Context(t) + var dataview finspace.GetKxDataviewOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_finspace_kx_dataview.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { + acctest.PreCheck(ctx, t) + acctest.PreCheckPartitionHasService(t, finspace.ServiceID) + }, + ErrorCheck: acctest.ErrorCheck(t, finspace.ServiceID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckKxDataviewDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccKxDataviewConfig_tags1(rName, "key1", "value1"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1"), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccKxDataviewConfig_tags2(rName, "key1", "value1updated", "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "2"), + resource.TestCheckResourceAttr(resourceName, "tags.key1", "value1updated"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + { + Config: testAccKxDataviewConfig_tags1(rName, "key2", "value2"), + Check: resource.ComposeTestCheckFunc( + testAccCheckKxDataviewExists(ctx, resourceName, &dataview), + resource.TestCheckResourceAttr(resourceName, "name", rName), + resource.TestCheckResourceAttr(resourceName, "tags.%", "1"), + resource.TestCheckResourceAttr(resourceName, "tags.key2", "value2"), + ), + }, + }, + }) +} + func TestAccFinSpaceKxDataview_withKxVolume(t *testing.T) { if testing.Short() { t.Skip("skipping long-running test in short mode") @@ -201,6 +257,45 @@ resource "aws_finspace_kx_dataview" "test" { `, rName)) } +func testAccKxDataviewConfig_tags1(rName, key1, value1 string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + + tags = { + %[2]q = %[3]q + } +} +`, rName, key1, value1)) +} + +func testAccKxDataviewConfig_tags2(rName, key1, value1, key2, value2 string) string { + return acctest.ConfigCompose( + testAccKxDataviewConfigBase(rName), + fmt.Sprintf(` +resource "aws_finspace_kx_dataview" "test" { + name = %[1]q + environment_id = aws_finspace_kx_environment.test.id + database_name = aws_finspace_kx_database.test.name + auto_update = true + az_mode = "SINGLE" + availability_zone_id = aws_finspace_kx_environment.test.availability_zones[0] + + tags = { + %[2]q = %[3]q + %[4]q = %[5]q + } +} +`, rName, key1, value1, key2, value2)) +} + func testAccKxDataviewConfig_withKxVolume(rName string) string { return acctest.ConfigCompose( testAccKxDataviewConfigBase(rName), From ceb839f2e76416686a3f4535a55f1ce75908b05d Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 16:01:09 -0500 Subject: [PATCH 349/438] r/aws_finspace_kx_dataview: compose syntheric arn on read Properly setting the arn attribute will allow transparent tagging to properly list tags on the underlying data view during read operations. This should fix persistent diffs when setting tags for this resource. --- .changelog/34998.txt | 3 ++ internal/service/finspace/kx_dataview.go | 43 ++++++++++++++---------- 2 files changed, 28 insertions(+), 18 deletions(-) create mode 100644 .changelog/34998.txt diff --git a/.changelog/34998.txt b/.changelog/34998.txt new file mode 100644 index 00000000000..8aa59933a7d --- /dev/null +++ b/.changelog/34998.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_finspace_kx_dataview: Properly set `arn` attribute on read, resolving persistent differences when `tags` are configured +``` diff --git a/internal/service/finspace/kx_dataview.go b/internal/service/finspace/kx_dataview.go index 84d90f267da..35dd16850d6 100644 --- a/internal/service/finspace/kx_dataview.go +++ b/internal/service/finspace/kx_dataview.go @@ -6,10 +6,12 @@ package finspace import ( "context" "errors" + "fmt" "log" "time" "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/arn" "github.com/aws/aws-sdk-go-v2/service/finspace" "github.com/aws/aws-sdk-go-v2/service/finspace/types" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -140,12 +142,15 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta var diags diag.Diagnostics conn := meta.(*conns.AWSClient).FinSpaceClient(ctx) + environmentID := d.Get("environment_id").(string) + databaseName := d.Get("database_name").(string) + name := d.Get("name").(string) + idParts := []string{ - d.Get("environment_id").(string), - d.Get("database_name").(string), - d.Get("name").(string), + environmentID, + databaseName, + name, } - rId, err := flex.FlattenResourceId(idParts, kxDataviewIdPartCount, false) if err != nil { return create.AppendDiagError(diags, names.FinSpace, create.ErrActionFlatteningResourceId, ResNameKxDataview, d.Get("name").(string), err) @@ -153,9 +158,9 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta d.SetId(rId) in := &finspace.CreateKxDataviewInput{ - DatabaseName: aws.String(d.Get("database_name").(string)), - DataviewName: aws.String(d.Get("name").(string)), - EnvironmentId: aws.String(d.Get("environment_id").(string)), + DatabaseName: aws.String(databaseName), + DataviewName: aws.String(name), + EnvironmentId: aws.String(environmentID), AutoUpdate: d.Get("auto_update").(bool), AzMode: types.KxAzMode(d.Get("az_mode").(string)), ClientToken: aws.String(id.UniqueId()), @@ -190,17 +195,6 @@ func resourceKxDataviewCreate(ctx context.Context, d *schema.ResourceData, meta return create.AppendDiagError(diags, names.FinSpace, create.ErrActionWaitingForCreation, ResNameKxDataview, d.Get("name").(string), err) } - // The CreateKxDataview API currently fails to tag the Dataview when the - // Tags field is set. Until the API is fixed, tag after creation instead. - // - // TODO: the identifier passed to createTags here likely needs to be an ARN, but this attribute - // is not returned from the create or describe APIs. The ARN may need to be manually constructed - // in order for tag after create to function. - // - // if err := createTags(ctx, conn, aws.ToString(out.DataviewName), getTagsIn(ctx)); err != nil { - // return create.AppendDiagError(diags, names.FinSpace, create.ErrActionCreating, ResNameKxDataview, d.Id(), err) - // } - return append(diags, resourceKxDataviewRead(ctx, d, meta)...) } @@ -233,6 +227,19 @@ func resourceKxDataviewRead(ctx context.Context, d *schema.ResourceData, meta in return create.AppendDiagError(diags, names.FinSpace, create.ErrActionReading, ResNameKxDataview, d.Id(), err) } + // Manually construct the dataview ARN, which is not returned from the + // Create or Describe APIs. + // + // Ref: https://docs.aws.amazon.com/service-authorization/latest/reference/list_amazonfinspace.html#amazonfinspace-resources-for-iam-policies + dataviewARN := arn.ARN{ + Partition: meta.(*conns.AWSClient).Partition, + Region: meta.(*conns.AWSClient).Region, + Service: names.FinSpace, + AccountID: meta.(*conns.AWSClient).AccountID, + Resource: fmt.Sprintf("kxEnvironment/%s/kxDatabase/%s/kxDataview/%s", aws.ToString(out.EnvironmentId), aws.ToString(out.DatabaseName), aws.ToString(out.DataviewName)), + }.String() + d.Set("arn", dataviewARN) + return diags } From f487eae30f110d422919f79df8deeed85bf0cdf2 Mon Sep 17 00:00:00 2001 From: Jared Baker Date: Tue, 19 Dec 2023 16:02:40 -0500 Subject: [PATCH 350/438] doc: add aws_finspace_kx_dataview to skip_requesting_account_id impacted resource list --- website/docs/index.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 41aa4bb473c..3b32abc074a 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -413,6 +413,7 @@ In addition to [generic `provider` arguments](https://www.terraform.io/docs/conf - [`aws_elasticache_cluster` resource](/docs/providers/aws/r/elasticache_cluster.html) - [`aws_elb` data source](/docs/providers/aws/d/elb.html) - [`aws_elb` resource](/docs/providers/aws/r/elb.html) + - [`aws_finspace_kx_dataview` resource](/docs/providers/aws/r/finspace_kx_dataview.html) - [`aws_flow_log` resource](/docs/providers/aws/r/flow_log.html) - [`aws_glue_catalog_database` resource](/docs/providers/aws/r/glue_catalog_database.html) - [`aws_glue_catalog_table` resource](/docs/providers/aws/r/glue_catalog_table.html) From efc50f6155b220a41767c640219bd429821e3208 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 16:30:09 -0500 Subject: [PATCH 351/438] r/aws_s3_bucket: Tidy up Read. --- internal/service/s3/bucket.go | 38 +++++++++++++++-------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 58768bed0f8..f4f7a0e646c 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1118,6 +1118,10 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("object_lock_enabled", nil) } + // + // Bucket Region etc. + // + region, err := manager.GetBucketRegion(ctx, conn, d.Id(), func(o *s3.Options) { o.UsePathStyle = meta.(*conns.AWSClient).S3UsePathStyle() }) @@ -1133,10 +1137,8 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf } d.Set("region", region) - d.Set("bucket_regional_domain_name", bucketRegionalDomainName(d.Id(), region)) - // Add the hosted zone ID for this bucket's region as an attribute hostedZoneID, err := hostedZoneIDForRegion(region) if err != nil { log.Printf("[WARN] %s", err) @@ -1150,36 +1152,28 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf d.Set("website_endpoint", endpoint) } - // Retry due to S3 eventual consistency - tagsRaw, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutRead), func() (interface{}, error) { + // + // Bucket Tags. + // + + tags, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (tftags.KeyValueTags, error) { return BucketListTags(ctx, conn, d.Id()) - }, s3.ErrCodeNoSuchBucket) + }) - // The S3 API method calls above can occasionally return no error (i.e. NoSuchBucket) - // after a bucket has been deleted (eventual consistency woes :/), thus, when making extra S3 API calls - // such as GetBucketTagging, the error should be caught for non-new buckets as follows. - if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { + if !d.IsNewResource() && tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { log.Printf("[WARN] S3 Bucket (%s) not found, removing from state", d.Id()) d.SetId("") return diags } - if tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented) { - return diags - } - - if err != nil { + switch { + case err == nil: + setTagsOut(ctx, Tags(tags)) + case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + default: return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s): %s", d.Id(), err) } - tags, ok := tagsRaw.(tftags.KeyValueTags) - - if !ok { - return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s): unable to convert tags", d.Id()) - } - - setTagsOut(ctx, Tags(tags)) - return diags } From da57f9ba48b044f34e4196fe03e0593883905244 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 16:33:39 -0500 Subject: [PATCH 352/438] r/aws_s3_bucket: Tidy up Delete. --- internal/service/s3/bucket.go | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index f4f7a0e646c..b6e336c6339 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1289,34 +1289,26 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte } func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - conn := meta.(*conns.AWSClient).S3Conn(ctx) - connSDKv2 := meta.(*conns.AWSClient).S3Client(ctx) + var diags diag.Diagnostics + conn := meta.(*conns.AWSClient).S3Client(ctx) log.Printf("[INFO] Deleting S3 Bucket: %s", d.Id()) - _, err := conn.DeleteBucketWithContext(ctx, &s3.DeleteBucketInput{ + _, err := conn.DeleteBucket(ctx, &s3.DeleteBucketInput{ Bucket: aws.String(d.Id()), }) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) { - return nil + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return diags } if tfawserr.ErrCodeEquals(err, errCodeBucketNotEmpty) { if d.Get("force_destroy").(bool) { - // Use a S3 service client that can handle multiple slashes in URIs. - // While aws_s3_object resources cannot create these object - // keys, other AWS services and applications using the S3 Bucket can. - conn := meta.(*conns.AWSClient).S3Client(ctx) - - // bucket may have things delete them - log.Printf("[DEBUG] S3 Bucket attempting to forceDestroy %s", err) - // Delete everything including locked objects. // Don't ignore any object errors or we could recurse infinitely. var objectLockEnabled bool objectLockConfiguration := expandObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) if objectLockConfiguration != nil { - objectLockEnabled = aws.StringValue(objectLockConfiguration.ObjectLockEnabled) == s3.ObjectLockEnabledEnabled + objectLockEnabled = objectLockConfiguration.ObjectLockEnabled == types.ObjectLockEnabledEnabled } if n, err := emptyBucket(ctx, conn, d.Id(), objectLockEnabled); err != nil { @@ -1325,24 +1317,24 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta inte log.Printf("[DEBUG] Deleted %d S3 objects", n) } - // this line recurses until all objects are deleted or an error is returned + // Recurse until all objects are deleted or an error is returned return resourceBucketDelete(ctx, d, meta) } } if err != nil { - return create.DiagError(names.S3, create.ErrActionDeleting, resNameBucket, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s): %s", d.Id(), err) } _, err = tfresource.RetryUntilNotFound(ctx, d.Timeout(schema.TimeoutDelete), func() (interface{}, error) { - return nil, findBucket(ctx, connSDKv2, d.Id()) + return nil, findBucket(ctx, conn, d.Id()) }) if err != nil { - return create.DiagError(names.S3, create.ErrActionWaitingForDeletion, resNameBucket, d.Id(), err) + return sdkdiag.AppendErrorf(diags, "waiting for S3 Bucket (%s) delete: %s", d.Id(), err) } - return nil + return diags } func findBucket(ctx context.Context, conn *s3.Client, bucket string, optFns ...func(*s3.Options)) error { From 854f61b41ae0fd97b80a59113f27930e4b292040 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Tue, 19 Dec 2023 17:03:18 -0500 Subject: [PATCH 353/438] r/aws_s3_bucket: Start to tidy up Update. --- internal/service/s3/bucket.go | 362 +++++++++++++++------------------- 1 file changed, 160 insertions(+), 202 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index b6e336c6339..4f80e62e66e 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -877,7 +877,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: - if err := d.Set("cors_rule", flattenBucketCorsRules(corsRules)); err != nil { + if err := d.Set("cors_rule", flattenBucketCORSRules(corsRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting cors_rule: %s", err) } case tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeNotImplemented, errCodeXNotImplemented): @@ -1150,6 +1150,9 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf endpoint, domain := bucketWebsiteEndpointAndDomain(d.Id(), region) d.Set("website_domain", domain) d.Set("website_endpoint", endpoint) + } else { + d.Set("website_domain", nil) + d.Set("website_endpoint", nil) } // @@ -1179,38 +1182,100 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics - conn := meta.(*conns.AWSClient).S3Conn(ctx) + conn := meta.(*conns.AWSClient).S3Client(ctx) - if d.HasChange("tags_all") { - o, n := d.GetChange("tags_all") + // Note: Order of argument updates below is important - // Retry due to S3 eventual consistency - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - terr := BucketUpdateTags(ctx, conn, d.Id(), o, n) - return nil, terr - }, s3.ErrCodeNoSuchBucket) + if d.HasChange("policy") { + policy, err := structure.NormalizeJsonString(d.Get("policy").(string)) if err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) tags: %s", d.Id(), err) + return sdkdiag.AppendFromErr(diags, err) } - } - // Note: Order of argument updates below is important + if policy == "" { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.DeleteBucketPolicy(ctx, &s3.DeleteBucketPolicyInput{ + Bucket: aws.String(d.Id()), + }) + }, errCodeNoSuchBucket) - if d.HasChange("policy") { - if err := resourceBucketInternalPolicyUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Policy: %s", d.Id(), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) policy: %s", d.Id(), err) + } + } else { + input := &s3.PutBucketPolicyInput{ + Bucket: aws.String(d.Id()), + Policy: aws.String(policy), + } + + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketPolicy(ctx, input) + }, errCodeMalformedPolicy, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) policy: %s", d.Id(), err) + } } } if d.HasChange("cors_rule") { - if err := resourceBucketInternalCorsUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) CORS Rules: %s", d.Id(), err) + if v, ok := d.GetOk("cors_rule"); ok && len(v.([]interface{})) == 0 { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.DeleteBucketCors(ctx, &s3.DeleteBucketCorsInput{ + Bucket: aws.String(d.Id()), + }) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) CORS configuration: %s", d.Id(), err) + } + } else { + input := &s3.PutBucketCorsInput{ + Bucket: aws.String(d.Id()), + CORSConfiguration: &types.CORSConfiguration{ + CORSRules: expandCORSRules(d.Get("cors_rule").(*schema.Set).List()), + }, + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketCors(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) CORS configuration: %s", d.Id(), err) + } } } if d.HasChange("website") { - if err := resourceBucketInternalWebsiteUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Website: %s", d.Id(), err) + if v, ok := d.GetOk("website"); ok && len(v.([]interface{})) == 0 || v.([]interface{})[0] == nil { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.DeleteBucketWebsite(ctx, &s3.DeleteBucketWebsiteInput{ + Bucket: aws.String(d.Id()), + }) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) website configuration: %s", d.Id(), err) + } + } else { + websiteConfig, err := expandBucketWebsiteConfiguration(v.([]interface{})[0].(map[string]interface{})) + if err != nil { + return sdkdiag.AppendFromErr(diags, err) + } + + input := &s3.PutBucketWebsiteInput{ + Bucket: aws.String(d.Id()), + WebsiteConfiguration: websiteConfig, + } + + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketWebsite(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) website configuration: %s", d.Id(), err) + } } } @@ -1285,6 +1350,20 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte } } + if d.HasChange("tags_all") { + o, n := d.GetChange("tags_all") + + // Retry due to S3 eventual consistency. + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + terr := BucketUpdateTags(ctx, conn, d.Id(), o, n) + return nil, terr + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) tags: %s", d.Id(), err) + } + } + return append(diags, resourceBucketRead(ctx, d, meta)...) } @@ -1317,7 +1396,7 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta inte log.Printf("[DEBUG] Deleted %d S3 objects", n) } - // Recurse until all objects are deleted or an error is returned + // Recurse until all objects are deleted or an error is returned. return resourceBucketDelete(ctx, d, meta) } } @@ -1447,72 +1526,6 @@ func resourceBucketInternalACLUpdate(ctx context.Context, conn *s3.S3, d *schema return err } -func resourceBucketInternalCorsUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - rawCors := d.Get("cors_rule").([]interface{}) - - if len(rawCors) == 0 { - // Delete CORS - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.DeleteBucketCorsWithContext(ctx, &s3.DeleteBucketCorsInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) - - if err != nil { - return fmt.Errorf("deleting S3 Bucket (%s) CORS: %w", d.Id(), err) - } - - return nil - } - // Put CORS - rules := make([]*s3.CORSRule, 0, len(rawCors)) - for _, cors := range rawCors { - // Prevent panic - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/7546 - corsMap, ok := cors.(map[string]interface{}) - if !ok { - continue - } - r := &s3.CORSRule{} - for k, v := range corsMap { - if k == "max_age_seconds" { - r.MaxAgeSeconds = aws.Int64(int64(v.(int))) - } else { - vMap := make([]*string, len(v.([]interface{}))) - for i, vv := range v.([]interface{}) { - if str, ok := vv.(string); ok { - vMap[i] = aws.String(str) - } - } - switch k { - case "allowed_headers": - r.AllowedHeaders = vMap - case "allowed_methods": - r.AllowedMethods = vMap - case "allowed_origins": - r.AllowedOrigins = vMap - case "expose_headers": - r.ExposeHeaders = vMap - } - } - } - rules = append(rules, r) - } - - input := &s3.PutBucketCorsInput{ - Bucket: aws.String(d.Id()), - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: rules, - }, - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketCorsWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - func resourceBucketInternalGrantsUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { grants := d.Get("grant").(*schema.Set) @@ -1757,49 +1770,6 @@ func resourceBucketInternalObjectLockConfigurationUpdate(ctx context.Context, co return err } -func resourceBucketInternalPolicyUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - policy, err := structure.NormalizeJsonString(d.Get("policy").(string)) - if err != nil { - return fmt.Errorf("policy (%s) is an invalid JSON: %w", policy, err) - } - - if policy == "" { - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.DeleteBucketPolicyWithContext(ctx, &s3.DeleteBucketPolicyInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) - - if err != nil { - return fmt.Errorf("deleting S3 Bucket (%s) policy: %w", d.Id(), err) - } - - return nil - } - - params := &s3.PutBucketPolicyInput{ - Bucket: aws.String(d.Id()), - Policy: aws.String(policy), - } - - err = retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { - _, err := conn.PutBucketPolicyWithContext(ctx, params) - if tfawserr.ErrCodeEquals(err, errCodeMalformedPolicy, s3.ErrCodeNoSuchBucket) { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } - return nil - }) - - if tfresource.TimedOut(err) { - _, err = conn.PutBucketPolicyWithContext(ctx, params) - } - - return err -} - func resourceBucketInternalReplicationConfigurationUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { replicationConfiguration := d.Get("replication_configuration").([]interface{}) @@ -1947,77 +1917,73 @@ func resourceBucketInternalVersioningUpdate(ctx context.Context, conn *s3.S3, bu return err } -func resourceBucketInternalWebsiteUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - ws := d.Get("website").([]interface{}) +///////////////////////////////////////////// Expand and Flatten functions ///////////////////////////////////////////// - if len(ws) == 0 { - input := &s3.DeleteBucketWebsiteInput{ - Bucket: aws.String(d.Id()), - } +// Cors Rule functions - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.DeleteBucketWebsiteWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) +func expandBucketCORSRules(l []interface{}) []types.CORSRule { + if len(l) == 0 { + return nil + } - if err != nil { - return fmt.Errorf("deleting S3 Bucket (%s) Website: %w", d.Id(), err) + var rules []types.CORSRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue } - d.Set("website_endpoint", "") - d.Set("website_domain", "") + rule := types.CORSRule{} - return nil - } + if v, ok := tfMap["allowed_headers"].([]interface{}); ok && len(v) > 0 { + rule.AllowedHeaders = flex.ExpandStringValueList(v) + } - websiteConfig, err := expandWebsiteConfiguration(ws) - if err != nil { - return fmt.Errorf("expanding S3 Bucket (%s) website configuration: %w", d.Id(), err) - } + if v, ok := tfMap["allowed_methods"].([]interface{}); ok && len(v) > 0 { + rule.AllowedMethods = flex.ExpandStringValueList(v) + } - input := &s3.PutBucketWebsiteInput{ - Bucket: aws.String(d.Id()), - WebsiteConfiguration: websiteConfig, - } + if v, ok := tfMap["allowed_origins"].([]interface{}); ok && len(v) > 0 { + rule.AllowedOrigins = flex.ExpandStringValueList(v) + } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketWebsiteWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + if v, ok := tfMap["expose_headers"].([]interface{}); ok && len(v) > 0 { + rule.ExposeHeaders = flex.ExpandStringValueList(v) + } - return err -} + if v, ok := tfMap["max_age_seconds"].(int); ok { + rule.MaxAgeSeconds = aws.Int32(int32(v)) + } -///////////////////////////////////////////// Expand and Flatten functions ///////////////////////////////////////////// + rules = append(rules, rule) + } -// Cors Rule functions + return rules +} -func flattenBucketCorsRules(rules []*s3.CORSRule) []interface{} { +func flattenBucketCORSRules(rules []types.CORSRule) []interface{} { var results []interface{} for _, rule := range rules { - if rule == nil { - continue + m := map[string]interface{}{ + "max_age_seconds": rule.MaxAgeSeconds, } - m := make(map[string]interface{}) - if len(rule.AllowedHeaders) > 0 { - m["allowed_headers"] = flex.FlattenStringList(rule.AllowedHeaders) + m["allowed_headers"] = rule.AllowedHeaders } if len(rule.AllowedMethods) > 0 { - m["allowed_methods"] = flex.FlattenStringList(rule.AllowedMethods) + m["allowed_methods"] = rule.AllowedMethods } if len(rule.AllowedOrigins) > 0 { - m["allowed_origins"] = flex.FlattenStringList(rule.AllowedOrigins) + m["allowed_origins"] = rule.AllowedOrigins } if len(rule.ExposeHeaders) > 0 { - m["expose_headers"] = flex.FlattenStringList(rule.ExposeHeaders) - } - - if rule.MaxAgeSeconds != nil { - m["max_age_seconds"] = int(aws.Int64Value(rule.MaxAgeSeconds)) + m["expose_headers"] = rule.ExposeHeaders } results = append(results, m) @@ -2842,62 +2808,54 @@ func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { // Website functions -func expandWebsiteConfiguration(l []interface{}) (*s3.WebsiteConfiguration, error) { - if len(l) == 0 || l[0] == nil { - return nil, nil - } +func expandBucketWebsiteConfiguration(tfMap map[string]interface{}) (*types.WebsiteConfiguration, error) { + websiteConfig := &types.WebsiteConfiguration{} - website, ok := l[0].(map[string]interface{}) - if !ok { - return nil, nil - } - - websiteConfiguration := &s3.WebsiteConfiguration{} - - if v, ok := website["index_document"].(string); ok && v != "" { - websiteConfiguration.IndexDocument = &s3.IndexDocument{ + if v, ok := tfMap["index_document"].(string); ok && v != "" { + websiteConfig.IndexDocument = &types.IndexDocument{ Suffix: aws.String(v), } } - if v, ok := website["error_document"].(string); ok && v != "" { - websiteConfiguration.ErrorDocument = &s3.ErrorDocument{ + if v, ok := tfMap["error_document"].(string); ok && v != "" { + websiteConfig.ErrorDocument = &types.ErrorDocument{ Key: aws.String(v), } } - if v, ok := website["redirect_all_requests_to"].(string); ok && v != "" { + if v, ok := tfMap["redirect_all_requests_to"].(string); ok && v != "" { redirect, err := url.Parse(v) if err == nil && redirect.Scheme != "" { - var redirectHostBuf bytes.Buffer - redirectHostBuf.WriteString(redirect.Host) + var buf bytes.Buffer + + buf.WriteString(redirect.Host) if redirect.Path != "" { - redirectHostBuf.WriteString(redirect.Path) + buf.WriteString(redirect.Path) } if redirect.RawQuery != "" { - redirectHostBuf.WriteString("?") - redirectHostBuf.WriteString(redirect.RawQuery) + buf.WriteString("?") + buf.WriteString(redirect.RawQuery) } - websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{ - HostName: aws.String(redirectHostBuf.String()), - Protocol: aws.String(redirect.Scheme), + websiteConfig.RedirectAllRequestsTo = &types.RedirectAllRequestsTo{ + HostName: aws.String(buf.String()), + Protocol: types.Protocol(redirect.Scheme), } } else { - websiteConfiguration.RedirectAllRequestsTo = &s3.RedirectAllRequestsTo{ + websiteConfig.RedirectAllRequestsTo = &types.RedirectAllRequestsTo{ HostName: aws.String(v), } } } - if v, ok := website["routing_rules"].(string); ok && v != "" { - var unmarshaledRules []*s3.RoutingRule - if err := json.Unmarshal([]byte(v), &unmarshaledRules); err != nil { + if v, ok := tfMap["routing_rules"].(string); ok && v != "" { + var routingRules []types.RoutingRule + if err := json.Unmarshal([]byte(v), &routingRules); err != nil { return nil, err } - websiteConfiguration.RoutingRules = unmarshaledRules + websiteConfig.RoutingRules = routingRules } - return websiteConfiguration, nil + return websiteConfig, nil } func flattenBucketWebsite(ws *s3.GetBucketWebsiteOutput) ([]interface{}, error) { From 009a40de6853f9698f8daa74bdcf826c74437599 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Dec 2023 17:01:28 -0800 Subject: [PATCH 354/438] Adds package `names/data` for accessing service data --- docs/add-a-new-service.md | 6 +- internal/generate/allowsubcats/main.go | 14 +-- internal/generate/awsclient/main.go | 14 +-- internal/generate/checknames/main.go | 112 +++++++++--------- internal/generate/issuelabels/main.go | 14 +-- internal/generate/prlabels/main.go | 14 +-- internal/generate/servicelabels/main.go | 14 +-- internal/generate/servicepackage/main.go | 14 +-- internal/generate/servicepackages/main.go | 14 +-- internal/generate/servicesemgrep/main.go | 12 +- internal/generate/sweeperregistration/main.go | 14 +-- internal/generate/teamcity/README.md | 2 +- internal/generate/teamcity/services.go | 12 +- names/README.md | 12 +- names/columns.go | 4 +- names/{ => data}/names_data.csv | 2 +- names/data/read.go | 37 ++++++ names/names.go | 24 ++-- 18 files changed, 158 insertions(+), 177 deletions(-) rename names/{ => data}/names_data.csv (99%) create mode 100644 names/data/read.go diff --git a/docs/add-a-new-service.md b/docs/add-a-new-service.md index 01e3c37ff19..6dd33d9c0ac 100644 --- a/docs/add-a-new-service.md +++ b/docs/add-a-new-service.md @@ -19,7 +19,7 @@ Before new resources are submitted, please raise a separate pull request contain To add an AWS SDK for Go service client: -1. Check the file `names/names_data.csv` for the service. +1. Check the file `names/data/names_data.csv` for the service. 1. If the service is there and there is no value in the `NotImplmented` column, you are ready to implement the first [resource](./add-a-new-resource.md) or [data source](./add-a-new-datasource.md). @@ -27,7 +27,7 @@ To add an AWS SDK for Go service client: 1. Otherwise, determine the service identifier using the rule described in [the Naming Guide](naming.md#service-identifier). -1. In `names/names_data.csv`, add a new line with all the requested information for the service following the guidance in the [`names` README](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md). +1. In `names/data/names_data.csv`, add a new line with all the requested information for the service following the guidance in the [`names` README](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md). !!! tip Be very careful when adding or changing data in `names_data.csv`! @@ -70,7 +70,7 @@ Once the service client has been added, implement the first [resource](./add-a-n If an AWS service must be created in a non-standard way, for example the service API's endpoint must be accessed via a single AWS Region, then: -1. Add an `x` in the **SkipClientGenerate** column for the service in [`names/names_data.csv`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md) +1. Add an `x` in the **SkipClientGenerate** column for the service in [`names/data/names_data.csv`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/data/README.md) 1. Run `make gen` diff --git a/internal/generate/allowsubcats/main.go b/internal/generate/allowsubcats/main.go index 07ed72688e4..1abedd0d3da 100644 --- a/internal/generate/allowsubcats/main.go +++ b/internal/generate/allowsubcats/main.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -25,26 +26,21 @@ type TemplateData struct { func main() { const ( - filename = `../../../website/allowed-subcategories.txt` - namesDataFile = "../../../names/names_data.csv" + filename = `../../../website/allowed-subcategories.txt` ) g := common.NewGenerator() g.Infof("Generating %s", strings.TrimPrefix(filename, "../../../")) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { continue } diff --git a/internal/generate/awsclient/main.go b/internal/generate/awsclient/main.go index 7090bc43c6c..fdd21a9d1fd 100644 --- a/internal/generate/awsclient/main.go +++ b/internal/generate/awsclient/main.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -28,26 +29,21 @@ type TemplateData struct { func main() { const ( - filename = `awsclient_gen.go` - namesDataFile = "../../names/names_data.csv" + filename = `awsclient_gen.go` ) g := common.NewGenerator() g.Infof("Generating internal/conns/%s", filename) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // skip header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" { continue } diff --git a/internal/generate/checknames/main.go b/internal/generate/checknames/main.go index ec5508777af..886654c9a26 100644 --- a/internal/generate/checknames/main.go +++ b/internal/generate/checknames/main.go @@ -8,7 +8,6 @@ package main import ( "bufio" - "encoding/csv" "fmt" "io/ioutil" "log" @@ -19,9 +18,12 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) -const namesDataFile = "../../../names/names_data.csv" +const ( + lineOffset = 2 // 1 for skipping header line + 1 to translate from 0-based to 1-based index +) // DocPrefix tests/column needs to be reworked for compatibility with tfproviderdocs type DocPrefix struct { @@ -34,31 +36,19 @@ var allDocs int // currently skipping this test var allChecks int func main() { - fmt.Printf("Checking %s\n", strings.TrimPrefix(namesDataFile, "../../../")) - - f, err := os.Open(namesDataFile) - if err != nil { - log.Fatal(err) - } - - defer f.Close() + fmt.Println("Checking service data") - csvReader := csv.NewReader(f) + data, err := data.ReadAllServiceData() - data, err := csvReader.ReadAll() if err != nil { - log.Fatal(err) + log.Fatalf("error reading service data: %s", err) } docPrefixes := []DocPrefix{} // test to be reworked for i, l := range data { - if i < 1 { // no header - continue - } - if l[names.ColHumanFriendly] == "" { - log.Fatalf("in names_data.csv line %d, HumanFriendly cannot be blank", i+1) + log.Fatalf("in service data, line %d, HumanFriendly cannot be blank", i+lineOffset) } // TODO: Check for duplicates in HumanFriendly, ProviderPackageActual, @@ -66,33 +56,33 @@ func main() { // ResourcePrefixActual, ResourcePrefixCorrect, FilePrefix, DocPrefix if l[names.ColAWSCLIV2Command] != "" && strings.Replace(l[names.ColAWSCLIV2Command], "-", "", -1) != l[names.ColAWSCLIV2CommandNoDashes] { - log.Fatalf("in names_data.csv, for service %s, AWSCLIV2CommandNoDashes must be the same as AWSCLIV2Command without dashes (%s)", l[names.ColHumanFriendly], strings.Replace(l[names.ColAWSCLIV2Command], "-", "", -1)) + log.Fatalf("in service data, line %d, for service %s, AWSCLIV2CommandNoDashes must be the same as AWSCLIV2Command without dashes (%s)", i, l[names.ColHumanFriendly], strings.Replace(l[names.ColAWSCLIV2Command], "-", "", -1)) } if l[names.ColProviderPackageCorrect] != "" && l[names.ColAWSCLIV2CommandNoDashes] != "" && l[names.ColGoV2Package] != "" { if len(l[names.ColAWSCLIV2CommandNoDashes]) < len(l[names.ColGoV2Package]) && l[names.ColProviderPackageCorrect] != l[names.ColAWSCLIV2CommandNoDashes] { - log.Fatalf("in names_data.csv, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", l[names.ColHumanFriendly], l[names.ColAWSCLIV2CommandNoDashes], l[names.ColGoV2Package]) + log.Fatalf("in service data, line %d, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", i, l[names.ColHumanFriendly], l[names.ColAWSCLIV2CommandNoDashes], l[names.ColGoV2Package]) } if len(l[names.ColAWSCLIV2CommandNoDashes]) > len(l[names.ColGoV2Package]) && l[names.ColProviderPackageCorrect] != l[names.ColGoV2Package] { - log.Fatalf("in names_data.csv, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", l[names.ColHumanFriendly], l[names.ColAWSCLIV2CommandNoDashes], l[names.ColGoV2Package]) + log.Fatalf("in service data, line %d, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", i, l[names.ColHumanFriendly], l[names.ColAWSCLIV2CommandNoDashes], l[names.ColGoV2Package]) } } if l[names.ColAWSCLIV2CommandNoDashes] == "" && l[names.ColGoV2Package] == "" && l[names.ColExclude] == "" { - log.Fatalf("in names_data.csv, for service %s, if Exclude is blank, either AWSCLIV2CommandNoDashes or GoV2Package must have values", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, if Exclude is blank, either AWSCLIV2CommandNoDashes or GoV2Package must have values", i, l[names.ColHumanFriendly]) } if l[names.ColProviderPackageActual] != "" && l[names.ColProviderPackageCorrect] == "" { - log.Fatalf("in names_data.csv, for service %s, ProviderPackageActual can't be non-blank if ProviderPackageCorrect is blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual can't be non-blank if ProviderPackageCorrect is blank", i, l[names.ColHumanFriendly]) } if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" && l[names.ColExclude] == "" { - log.Fatalf("in names_data.csv, for service %s, ProviderPackageActual and ProviderPackageCorrect cannot both be blank unless Exclude is non-blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual and ProviderPackageCorrect cannot both be blank unless Exclude is non-blank", i, l[names.ColHumanFriendly]) } if l[names.ColProviderPackageCorrect] != "" && l[names.ColProviderPackageActual] == l[names.ColProviderPackageCorrect] { - log.Fatalf("in names_data.csv, for service %s, ProviderPackageActual should only be used if different from ProviderPackageCorrect", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual should only be used if different from ProviderPackageCorrect", i, l[names.ColHumanFriendly]) } packageToUse := l[names.ColProviderPackageCorrect] @@ -106,76 +96,82 @@ func main() { for _, v := range p { if v == packageToUse { - log.Fatalf("in names_data.csv, for service %s, Aliases should not include ProviderPackageActual, if not blank, or ProviderPackageCorrect, if not blank and ProviderPackageActual is blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, Aliases should not include ProviderPackageActual, if not blank, or ProviderPackageCorrect, if not blank and ProviderPackageActual is blank", i, l[names.ColHumanFriendly]) } } } if l[names.ColClientSDKV1] == "" && l[names.ColClientSDKV2] == "" && l[names.ColExclude] == "" { - log.Fatalf("in names_data.csv, for service %s, at least one of ClientSDKV1 or ClientSDKV2 must have a value if Exclude is blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, at least one of ClientSDKV1 or ClientSDKV2 must have a value if Exclude is blank", i, l[names.ColHumanFriendly]) } if l[names.ColClientSDKV1] != "" && (l[names.ColGoV1Package] == "" || l[names.ColGoV1ClientTypeName] == "") { - log.Fatalf("in names_data.csv, for service %s, SDKVersion is set to 1 so neither GoV1Package nor GoV1ClientTypeName can be blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 1 so neither GoV1Package nor GoV1ClientTypeName can be blank", i, l[names.ColHumanFriendly]) } if l[names.ColClientSDKV2] != "" && l[names.ColGoV2Package] == "" { - log.Fatalf("in names_data.csv, for service %s, SDKVersion is set to 2 so GoV2Package cannot be blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 2 so GoV2Package cannot be blank", i, l[names.ColHumanFriendly]) } if l[names.ColResourcePrefixCorrect] != "" && l[names.ColResourcePrefixCorrect] != fmt.Sprintf("aws_%s_", l[names.ColProviderPackageCorrect]) { - log.Fatalf("in names_data.csv, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", i, l[names.ColHumanFriendly]) } if l[names.ColResourcePrefixCorrect] != "" && l[names.ColResourcePrefixActual] == l[names.ColResourcePrefixCorrect] { - log.Fatalf("in names_data.csv, for service %s, ResourcePrefixActual should not be the same as ResourcePrefixCorrect, set ResourcePrefixActual to blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, ResourcePrefixActual should not be the same as ResourcePrefixCorrect, set ResourcePrefixActual to blank", i, l[names.ColHumanFriendly]) } if l[names.ColSplitPackageRealPackage] != "" && (l[names.ColProviderPackageCorrect] == "" || l[names.ColFilePrefix] == "" || l[names.ColResourcePrefixActual] == "") { - log.Fatalf("in names_data.csv, for service %s, if SplitPackageRealPackage has a value, ProviderPackageCorrect, ResourcePrefixActual and FilePrefix must have values", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, if SplitPackageRealPackage has a value, ProviderPackageCorrect, ResourcePrefixActual and FilePrefix must have values", i, l[names.ColHumanFriendly]) } if l[names.ColSplitPackageRealPackage] == "" && l[names.ColFilePrefix] != "" { - log.Fatalf("in names_data.csv, for service %s, if SplitPackageRealPackge is blank, FilePrefix must also be blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, if SplitPackageRealPackge is blank, FilePrefix must also be blank", i, l[names.ColHumanFriendly]) } if l[names.ColBrand] != "AWS" && l[names.ColBrand] != "Amazon" && l[names.ColBrand] != "" { - log.Fatalf("in names_data.csv, for service %s, Brand must be AWS, Amazon, or blank; found %s", l[names.ColHumanFriendly], l[names.ColBrand]) + log.Fatalf("in service data, line %d, for service %s, Brand must be AWS, Amazon, or blank; found %s", l[names.ColHumanFriendly], i, l[names.ColBrand]) } if (l[names.ColExclude] == "" || (l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] != "")) && l[names.ColDocPrefix] == "" { - log.Fatalf("in names_data.csv, for service %s, DocPrefix cannot be blank unless Exclude is non-blank and AllowedSubcategory is blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, DocPrefix cannot be blank unless Exclude is non-blank and AllowedSubcategory is blank", i, l[names.ColHumanFriendly]) } - checkAllLowercase(l[names.ColHumanFriendly], "AWSCLIV2Command", l[names.ColAWSCLIV2Command]) - checkAllLowercase(l[names.ColHumanFriendly], "AWSCLIV2CommandNoDashes", l[names.ColAWSCLIV2CommandNoDashes]) - checkAllLowercase(l[names.ColHumanFriendly], "GoV1Package", l[names.ColGoV1Package]) - checkAllLowercase(l[names.ColHumanFriendly], "GoV2Package", l[names.ColGoV2Package]) - checkAllLowercase(l[names.ColHumanFriendly], "ProviderPackageActual", l[names.ColProviderPackageActual]) - checkAllLowercase(l[names.ColHumanFriendly], "ProviderPackageCorrect", l[names.ColProviderPackageCorrect]) - checkAllLowercase(l[names.ColHumanFriendly], "SplitPackageRealPackage", l[names.ColSplitPackageRealPackage]) - checkAllLowercase(l[names.ColHumanFriendly], "Aliases", l[names.ColAliases]) - checkAllLowercase(l[names.ColHumanFriendly], "ResourcePrefixActual", l[names.ColResourcePrefixActual]) - checkAllLowercase(l[names.ColHumanFriendly], "ResourcePrefixCorrect", l[names.ColResourcePrefixCorrect]) - checkAllLowercase(l[names.ColHumanFriendly], "FilePrefix", l[names.ColFilePrefix]) - checkAllLowercase(l[names.ColHumanFriendly], "DocPrefix", l[names.ColDocPrefix]) - - checkNotAllLowercase(l[names.ColHumanFriendly], "ProviderNameUpper", l[names.ColProviderNameUpper]) - checkNotAllLowercase(l[names.ColHumanFriendly], "GoV1ClientTypeName", l[names.ColGoV1ClientTypeName]) - checkNotAllLowercase(l[names.ColHumanFriendly], "HumanFriendly", l[names.ColHumanFriendly]) + checkAllLowercase(i, l[names.ColHumanFriendly], "AWSCLIV2Command", l[names.ColAWSCLIV2Command]) + checkAllLowercase(i, l[names.ColHumanFriendly], "AWSCLIV2CommandNoDashes", l[names.ColAWSCLIV2CommandNoDashes]) + checkAllLowercase(i, l[names.ColHumanFriendly], "GoV1Package", l[names.ColGoV1Package]) + checkAllLowercase(i, l[names.ColHumanFriendly], "GoV2Package", l[names.ColGoV2Package]) + checkAllLowercase(i, l[names.ColHumanFriendly], "ProviderPackageActual", l[names.ColProviderPackageActual]) + checkAllLowercase(i, l[names.ColHumanFriendly], "ProviderPackageCorrect", l[names.ColProviderPackageCorrect]) + checkAllLowercase(i, l[names.ColHumanFriendly], "SplitPackageRealPackage", l[names.ColSplitPackageRealPackage]) + checkAllLowercase(i, l[names.ColHumanFriendly], "Aliases", l[names.ColAliases]) + checkAllLowercase(i, l[names.ColHumanFriendly], "ResourcePrefixActual", l[names.ColResourcePrefixActual]) + checkAllLowercase(i, l[names.ColHumanFriendly], "ResourcePrefixCorrect", l[names.ColResourcePrefixCorrect]) + checkAllLowercase(i, l[names.ColHumanFriendly], "FilePrefix", l[names.ColFilePrefix]) + checkAllLowercase(i, l[names.ColHumanFriendly], "DocPrefix", l[names.ColDocPrefix]) + + checkNotAllLowercase(i, l[names.ColHumanFriendly], "ProviderNameUpper", l[names.ColProviderNameUpper]) + checkNotAllLowercase(i, l[names.ColHumanFriendly], "GoV1ClientTypeName", l[names.ColGoV1ClientTypeName]) + checkNotAllLowercase(i, l[names.ColHumanFriendly], "HumanFriendly", l[names.ColHumanFriendly]) if l[names.ColExclude] == "" && l[names.ColAllowedSubcategory] != "" { - log.Fatalf("in names_data.csv, for service %s, AllowedSubcategory can only be non-blank if Exclude is non-blank", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, AllowedSubcategory can only be non-blank if Exclude is non-blank", i, l[names.ColHumanFriendly]) } if l[names.ColExclude] != "" && l[names.ColNote] == "" { - log.Fatalf("in names_data.csv, for service %s, if Exclude is not blank, include a Note why", l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, if Exclude is not blank, include a Note why", i, l[names.ColHumanFriendly]) } if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { continue } + deprecatedEnvVar := l[names.ColDeprecatedEnvVar] != "" + tfAwsEnvVar := l[names.ColTfAwsEnvVar] != "" + if deprecatedEnvVar != tfAwsEnvVar { + log.Fatalf("in service data, line %d, for service %s, either both DeprecatedEnvVar and TfAwsEnvVar must be specified or neither can be", i, l[names.ColHumanFriendly]) + } + rre := l[names.ColResourcePrefixActual] if rre == "" { @@ -190,7 +186,7 @@ func main() { allChecks++ } - fmt.Printf(" Performed %d checks on names_data.csv, 0 errors.\n", (allChecks * 36)) + fmt.Printf(" Performed %d checks on names_data.csv, 0 errors.\n", (allChecks * 37)) var fileErrs bool @@ -214,15 +210,15 @@ func main() { fmt.Printf(" Checked %d documentation files to ensure filename prefix, resource name, label regex, and subcategory match, 0 errors.\n", allDocs) } -func checkAllLowercase(service, name, value string) { +func checkAllLowercase(i int, service, name, value string) { if value != "" && strings.ToLower(value) != value { - log.Fatalf("in names_data.csv, for service %s, %s should not include uppercase letters (%s)", service, name, value) + log.Fatalf("in service data, line %d, for service %s, %s should not include uppercase letters (%s)", i, service, name, value) } } -func checkNotAllLowercase(service, name, value string) { +func checkNotAllLowercase(i int, service, name, value string) { if value != "" && strings.ToLower(value) == value { - log.Fatalf("in names_data.csv, for service %s, %s should be properly capitalized; it does not include uppercase letters (%s)", service, name, value) + log.Fatalf("in service data, line %d, for service %s, %s should be properly capitalized; it does not include uppercase letters (%s)", i, service, name, value) } } diff --git a/internal/generate/issuelabels/main.go b/internal/generate/issuelabels/main.go index 50b1f7ef258..06178e1135f 100644 --- a/internal/generate/issuelabels/main.go +++ b/internal/generate/issuelabels/main.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -26,26 +27,21 @@ type TemplateData struct { func main() { const ( - filename = `../../../.github/labeler-issue-triage.yml` - namesDataFile = "../../../names/names_data.csv" + filename = `../../../.github/labeler-issue-triage.yml` ) g := common.NewGenerator() g.Infof("Generating %s", strings.TrimPrefix(filename, "../../../")) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { continue } diff --git a/internal/generate/prlabels/main.go b/internal/generate/prlabels/main.go index 6f3fd55f3a7..734d261b279 100644 --- a/internal/generate/prlabels/main.go +++ b/internal/generate/prlabels/main.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -28,26 +29,21 @@ type TemplateData struct { func main() { const ( - filename = `../../../.github/labeler-pr-triage.yml` - namesDataFile = "../../../names/names_data.csv" + filename = `../../../.github/labeler-pr-triage.yml` ) g := common.NewGenerator() g.Infof("Generating %s", strings.TrimPrefix(filename, "../../../")) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { continue } diff --git a/internal/generate/servicelabels/main.go b/internal/generate/servicelabels/main.go index 981f75d475f..c09d4fd8cef 100644 --- a/internal/generate/servicelabels/main.go +++ b/internal/generate/servicelabels/main.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -25,26 +26,21 @@ type TemplateData struct { func main() { const ( - filename = `../../../infrastructure/repository/labels-service.tf` - namesDataFile = "../../../names/names_data.csv" + filename = `../../../infrastructure/repository/labels-service.tf` ) g := common.NewGenerator() g.Infof("Generating %s", strings.TrimPrefix(filename, "../../../")) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { continue } diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index df02714630e..7284f8d5b05 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -20,31 +20,27 @@ import ( multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" "golang.org/x/exp/slices" ) func main() { const ( - filename = `service_package_gen.go` - namesDataFile = `../../../names/names_data.csv` + filename = `service_package_gen.go` ) g := common.NewGenerator() - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } servicePackage := os.Getenv("GOPACKAGE") g.Infof("Generating internal/service/%s/%s", servicePackage, filename) - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { continue } diff --git a/internal/generate/servicepackages/main.go b/internal/generate/servicepackages/main.go index e5a61f98d6c..4b7756e432f 100644 --- a/internal/generate/servicepackages/main.go +++ b/internal/generate/servicepackages/main.go @@ -15,12 +15,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) func main() { - const ( - namesDataFile = `../../names/names_data.csv` - ) filename := `service_packages_gen.go` flag.Parse() @@ -35,21 +33,17 @@ func main() { g.Infof("Generating %s/%s", packageName, filename) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{ PackageName: packageName, } - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { continue } diff --git a/internal/generate/servicesemgrep/main.go b/internal/generate/servicesemgrep/main.go index aee81ffcce6..d21e1d715a5 100644 --- a/internal/generate/servicesemgrep/main.go +++ b/internal/generate/servicesemgrep/main.go @@ -21,6 +21,7 @@ import ( "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) //go:embed semgrep_header.tmpl @@ -61,7 +62,6 @@ func main() { filename = `../../../.ci/.semgrep-service-name.yml` filenameCAE = `../../../.ci/.semgrep-caps-aws-ec2.yml` filenameConfigs = `../../../.ci/.semgrep-configs.yml` - namesDataFile = "../../../names/names_data.csv" capsDataFile = "../../../names/caps.csv" ) g := common.NewGenerator() @@ -101,19 +101,15 @@ func main() { g.Infof("Generating %s", strings.TrimPrefix(filename, "../../../")) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { continue } diff --git a/internal/generate/sweeperregistration/main.go b/internal/generate/sweeperregistration/main.go index 55c4710ca46..2a63228dc4e 100644 --- a/internal/generate/sweeperregistration/main.go +++ b/internal/generate/sweeperregistration/main.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -29,8 +30,7 @@ type TemplateData struct { func main() { const ( - filename = `register_gen_test.go` - namesDataFile = "../../names/names_data.csv" + filename = `register_gen_test.go` ) g := common.NewGenerator() @@ -38,21 +38,17 @@ func main() { g.Infof("Generating %s/%s", packageName, filename) - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{ PackageName: packageName, } - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" { continue } diff --git a/internal/generate/teamcity/README.md b/internal/generate/teamcity/README.md index 409baf1e519..c283cf4f44e 100644 --- a/internal/generate/teamcity/README.md +++ b/internal/generate/teamcity/README.md @@ -7,7 +7,7 @@ Can be invoked using either `make gen` along with all other generators or ## Configuration -The generator creates a TeamCity build configuration for each service listed in `names/names_data.csv`. +The generator creates a TeamCity build configuration for each service listed in `names/data/names_data.csv`. By default, the service acceptance tests do not use the VPC Lock and use the default parallelism. These setting can be overridden for each service by adding a `service` entry in the file `acctest_services.hcl`. diff --git a/internal/generate/teamcity/services.go b/internal/generate/teamcity/services.go index 01f786900ee..5fc5940b040 100644 --- a/internal/generate/teamcity/services.go +++ b/internal/generate/teamcity/services.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/hcl/v2/hclsimple" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/names/data" ) type ServiceDatum struct { @@ -36,7 +37,6 @@ type TemplateData struct { func main() { const ( servicesAllFile = `../../../.teamcity/components/generated/services_all.kt` - namesDataFile = "../../../names/names_data.csv" serviceConfigFile = "./acctest_services.hcl" ) g := common.NewGenerator() @@ -49,19 +49,15 @@ func main() { g.Fatalf("error reading %s: %s", serviceConfigFile, err) } - data, err := common.ReadAllCSVData(namesDataFile) + data, err := data.ReadAllServiceData() if err != nil { - g.Fatalf("error reading %s: %s", namesDataFile, err) + g.Fatalf("error reading service data: %s", err) } td := TemplateData{} - for i, l := range data { - if i < 1 { // no header - continue - } - + for _, l := range data { if l[names.ColExclude] != "" { continue } diff --git a/names/README.md b/names/README.md index 0e7c81a7825..97bf493fd2c 100644 --- a/names/README.md +++ b/names/README.md @@ -2,9 +2,9 @@ Package `names` provides AWS service-name information that is critical to the Terraform AWS Provider working correctly. If you are unsure about a change you are making, please do not hesitate to ask! -**NOTE:** The information in `names_data.csv` affects the provider, generators, documentation, website navigation, etc. working correctly. _Please do not make any changes until you understand the table below._ +**NOTE:** The information in `data/names_data.csv` affects the provider, generators, documentation, website navigation, etc. working correctly. _Please do not make any changes until you understand the table below._ -The core of the `names` package is `names_data.csv`, which contains raw, comma-separated data about naming in the AWS Provider, AWS Go SDKs v1 and v2, and AWS CLI. The file is dynamically embedded at build time in the AWS Provider and referenced by generators when generating code. _The information it contains must be correct._ Please double-check any changes. +The core of the `names` package is `data/names_data.csv`, which contains raw, comma-separated data about naming in the AWS Provider, AWS Go SDKs v1 and v2, and AWS CLI. The file is dynamically embedded at build time in the AWS Provider and referenced by generators when generating code. _The information it contains must be correct._ Please double-check any changes. Consumers of `names` include: @@ -13,9 +13,9 @@ Consumers of `names` include: * AWS Provider generators * `skaff` tool -After any edits to `names_data.csv`, run `make gen`. Doing so regenerates code and performs checks on `names_data.csv`. +After any edits to `data/names_data.csv`, run `make gen`. Doing so regenerates code and performs checks on `data/names_data.csv`. -The columns of `names_data.csv` are as follows: +The columns of `data/names_data.csv` are as follows: | Index | Name | Use | Description | | --- | --- | --- | --- | @@ -42,8 +42,8 @@ The columns of `names_data.csv` are as follows: | 20 | **NotImplemented** | Code | Whether the service is implemented by the provider | | 21 | **EndpointOnly** | Code | If **NotImplemented** is non-blank, whether the service endpoint should be included in the provider `endpoints` configuration | | 22 | **AllowedSubcategory** | Code | If **Exclude** is non-blank, whether to include **HumanFriendly** in `website/allowed-subcategories.txt` anyway. In other words, if non-blank, overrides **Exclude** in some situations. Some excluded pseudo-services (_e.g._, VPC is part of EC2) are still subcategories. Only applies if **Exclude** is non-blank. | -| 23 | **DeprecatedEnvVar** | Code | Deprecated environment variable name | -| 24 | **EnvVar** | Code | Current environment variable associated with service | +| 23 | **DeprecatedEnvVar** | Code | Deprecated `AWS__ENDPOINT` envvar defined for some services | +| 24 | **TfAwsEnvVar** | Code | `TF_AWS__ENDPOINT` envvar defined for some services | | 25 | **Note** | Reference | Very brief note usually to explain why excluded | For more information about service naming, see [the Naming Guide](https://hashicorp.github.io/terraform-provider-aws/naming/#service-identifier). diff --git a/names/columns.go b/names/columns.go index 38b4760f5d1..96671027845 100644 --- a/names/columns.go +++ b/names/columns.go @@ -27,7 +27,7 @@ const ( ColNotImplemented = 20 // If set, the service will be included in, e.g. labels, but not have a service client ColEndpointOnly = 21 // If set, the service is included in list of endpoints ColAllowedSubcategory = 22 - ColDeprecatedEnvVar = 23 - ColEnvVar = 24 + ColDeprecatedEnvVar = 23 // Deprecated `AWS__ENDPOINT` envvar defined for some services + ColTfAwsEnvVar = 24 // `TF_AWS__ENDPOINT` envvar defined for some services ColNote = 25 ) diff --git a/names/names_data.csv b/names/data/names_data.csv similarity index 99% rename from names/names_data.csv rename to names/data/names_data.csv index 1db2a8f79ec..e4d861e190f 100644 --- a/names/names_data.csv +++ b/names/data/names_data.csv @@ -1,4 +1,4 @@ -AWSCLIV2Command,AWSCLIV2CommandNoDashes,GoV1Package,GoV2Package,ProviderPackageActual,ProviderPackageCorrect,SplitPackageRealPackage,Aliases,ProviderNameUpper,GoV1ClientTypeName,SkipClientGenerate,ClientSDKV1,ClientSDKV2,ResourcePrefixActual,ResourcePrefixCorrect,FilePrefix,DocPrefix,HumanFriendly,Brand,Exclude,NotImplemented,EndpointOnly,AllowedSubcategory,DeprecatedEnvVar,EnvVar,Note +AWSCLIV2Command,AWSCLIV2CommandNoDashes,GoV1Package,GoV2Package,ProviderPackageActual,ProviderPackageCorrect,SplitPackageRealPackage,Aliases,ProviderNameUpper,GoV1ClientTypeName,SkipClientGenerate,ClientSDKV1,ClientSDKV2,ResourcePrefixActual,ResourcePrefixCorrect,FilePrefix,DocPrefix,HumanFriendly,Brand,Exclude,NotImplemented,EndpointOnly,AllowedSubcategory,DeprecatedEnvVar,TfAwsEnvVar,Note accessanalyzer,accessanalyzer,accessanalyzer,accessanalyzer,,accessanalyzer,,,AccessAnalyzer,AccessAnalyzer,,,2,,aws_accessanalyzer_,,accessanalyzer_,IAM Access Analyzer,AWS,,,,,,, account,account,account,account,,account,,,Account,Account,,,2,,aws_account_,,account_,Account Management,AWS,,,,,,, acm,acm,acm,acm,,acm,,,ACM,ACM,,,2,,aws_acm_,,acm_,ACM (Certificate Manager),AWS,,,,,,, diff --git a/names/data/read.go b/names/data/read.go new file mode 100644 index 00000000000..953fa9bfd27 --- /dev/null +++ b/names/data/read.go @@ -0,0 +1,37 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: MPL-2.0 + +package data + +import ( + "bytes" + _ "embed" + "encoding/csv" + "io" +) + +type ServiceRecord []string + +func ReadAllServiceData() (results []ServiceRecord, err error) { + reader := csv.NewReader(bytes.NewReader(namesData)) + // reader.ReuseRecord = true + + // Skip the header + reader.Read() + + for { + r, err := reader.Read() + if err == io.EOF { + break + } + if err != nil { + return nil, nil + } + results = append(results, ServiceRecord(r)) + } + + return +} + +//go:embed names_data.csv +var namesData []byte diff --git a/names/names.go b/names/names.go index 30dd6320c9d..1993da0cde5 100644 --- a/names/names.go +++ b/names/names.go @@ -3,25 +3,24 @@ // Package names provides constants for AWS service names that are used as keys // for the endpoints slice in internal/conns/conns.go. The package also exposes -// access to data found in the names_data.csv file, which provides additional +// access to data found in the data/names_data.csv file, which provides additional // service-related name information. // // Consumers of the names package include the conns package // (internal/conn/conns.go), the provider package // (internal/provider/provider.go), generators, and the skaff tool. // -// It is very important that information in the names_data.csv be exactly +// It is very important that information in the data/names_data.csv be exactly // correct because the Terrform AWS Provider relies on the information to // function correctly. package names import ( - _ "embed" - "encoding/csv" "fmt" "log" "strings" + "github.com/hashicorp/terraform-provider-aws/names/data" "golang.org/x/exp/slices" ) @@ -115,7 +114,7 @@ const ( USGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US-West). ) -// Type ServiceDatum corresponds closely to columns in `names_data.csv` and are +// Type ServiceDatum corresponds closely to columns in `data/names_data.csv` and are // described in detail in README.md. type ServiceDatum struct { Aliases []string @@ -142,25 +141,16 @@ func init() { } } -//go:embed names_data.csv -var namesData string - func readCSVIntoServiceData() error { // names_data.csv is dynamically embedded so changes, additions should be made // there also - r := csv.NewReader(strings.NewReader(namesData)) - - d, err := r.ReadAll() + d, err := data.ReadAllServiceData() if err != nil { return fmt.Errorf("reading CSV into service data: %w", err) } - for i, l := range d { - if i < 1 { // omit header line - continue - } - + for _, l := range d { if l[ColExclude] != "" { continue } @@ -183,7 +173,7 @@ func readCSVIntoServiceData() error { Brand: l[ColBrand], DeprecatedEnvVar: l[ColDeprecatedEnvVar], EndpointOnly: l[ColEndpointOnly] != "", - EnvVar: l[ColEnvVar], + EnvVar: l[ColTfAwsEnvVar], GoV1ClientTypeName: l[ColGoV1ClientTypeName], GoV1Package: l[ColGoV1Package], GoV2Package: l[ColGoV2Package], From 7e08fb1d5f32920db1034bf767b5c19eac2ee318 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Dec 2023 17:19:42 -0800 Subject: [PATCH 355/438] Changes service data fields to function calls instead of index accesses --- internal/generate/allowsubcats/main.go | 7 +- internal/generate/awsclient/main.go | 21 ++- internal/generate/checknames/main.go | 147 +++++++++--------- internal/generate/issuelabels/main.go | 19 ++- internal/generate/prlabels/main.go | 19 ++- internal/generate/servicelabels/main.go | 11 +- internal/generate/servicepackage/main.go | 25 ++- internal/generate/servicepackages/main.go | 9 +- internal/generate/servicesemgrep/main.go | 51 +++--- internal/generate/sweeperregistration/main.go | 11 +- internal/generate/teamcity/services.go | 13 +- internal/provider/provider.go | 2 +- names/data/read.go | 137 ++++++++++++++++ names/names.go | 41 +++-- 14 files changed, 318 insertions(+), 195 deletions(-) diff --git a/internal/generate/allowsubcats/main.go b/internal/generate/allowsubcats/main.go index 1abedd0d3da..bcf0a3aca00 100644 --- a/internal/generate/allowsubcats/main.go +++ b/internal/generate/allowsubcats/main.go @@ -12,7 +12,6 @@ import ( "strings" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -41,16 +40,16 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { + if l.Exclude() && l.AllowedSubcategory() == "" { continue } - if l[names.ColNotImplemented] != "" { + if l.NotImplemented() { continue } sd := ServiceDatum{ - HumanFriendly: l[names.ColHumanFriendly], + HumanFriendly: l.HumanFriendly(), } td.Services = append(td.Services, sd) diff --git a/internal/generate/awsclient/main.go b/internal/generate/awsclient/main.go index fdd21a9d1fd..b5e7a70faa5 100644 --- a/internal/generate/awsclient/main.go +++ b/internal/generate/awsclient/main.go @@ -11,7 +11,6 @@ import ( "sort" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -44,30 +43,30 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" { + if l.Exclude() { continue } - if l[names.ColNotImplemented] != "" { + if l.NotImplemented() { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } s := ServiceDatum{ - ProviderNameUpper: l[names.ColProviderNameUpper], - GoV1Package: l[names.ColGoV1Package], - GoV2Package: l[names.ColGoV2Package], + ProviderNameUpper: l.ProviderNameUpper(), + GoV1Package: l.GoV1Package(), + GoV2Package: l.GoV2Package(), } - if l[names.ColClientSDKV1] != "" { + if l.ClientSDKV1() != "" { s.SDKVersion = "1" - s.GoV1ClientTypeName = l[names.ColGoV1ClientTypeName] + s.GoV1ClientTypeName = l.GoV1ClientTypeName() } - if l[names.ColClientSDKV2] != "" { - if l[names.ColClientSDKV1] != "" { + if l.ClientSDKV2() != "" { + if l.ClientSDKV1() != "" { s.SDKVersion = "1,2" } else { s.SDKVersion = "2" diff --git a/internal/generate/checknames/main.go b/internal/generate/checknames/main.go index 886654c9a26..897e8994d69 100644 --- a/internal/generate/checknames/main.go +++ b/internal/generate/checknames/main.go @@ -17,7 +17,6 @@ import ( "strings" multierror "github.com/hashicorp/go-multierror" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -47,7 +46,7 @@ func main() { docPrefixes := []DocPrefix{} // test to be reworked for i, l := range data { - if l[names.ColHumanFriendly] == "" { + if l.HumanFriendly() == "" { log.Fatalf("in service data, line %d, HumanFriendly cannot be blank", i+lineOffset) } @@ -55,132 +54,130 @@ func main() { // ProviderPackageCorrect, ProviderNameUpper, GoV1ClientTypeName, // ResourcePrefixActual, ResourcePrefixCorrect, FilePrefix, DocPrefix - if l[names.ColAWSCLIV2Command] != "" && strings.Replace(l[names.ColAWSCLIV2Command], "-", "", -1) != l[names.ColAWSCLIV2CommandNoDashes] { - log.Fatalf("in service data, line %d, for service %s, AWSCLIV2CommandNoDashes must be the same as AWSCLIV2Command without dashes (%s)", i, l[names.ColHumanFriendly], strings.Replace(l[names.ColAWSCLIV2Command], "-", "", -1)) + if l.AWSCLIV2Command() != "" && strings.Replace(l.AWSCLIV2Command(), "-", "", -1) != l.AWSCLIV2CommandNoDashes() { + log.Fatalf("in service data, line %d, for service %s, AWSCLIV2CommandNoDashes must be the same as AWSCLIV2Command without dashes (%s)", i, l.HumanFriendly(), strings.Replace(l.AWSCLIV2Command(), "-", "", -1)) } - if l[names.ColProviderPackageCorrect] != "" && l[names.ColAWSCLIV2CommandNoDashes] != "" && l[names.ColGoV2Package] != "" { - if len(l[names.ColAWSCLIV2CommandNoDashes]) < len(l[names.ColGoV2Package]) && l[names.ColProviderPackageCorrect] != l[names.ColAWSCLIV2CommandNoDashes] { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", i, l[names.ColHumanFriendly], l[names.ColAWSCLIV2CommandNoDashes], l[names.ColGoV2Package]) + if l.ProviderPackageCorrect() != "" && l.AWSCLIV2CommandNoDashes() != "" && l.GoV2Package() != "" { + if len(l.AWSCLIV2CommandNoDashes()) < len(l.GoV2Package()) && l.ProviderPackageCorrect() != l.AWSCLIV2CommandNoDashes() { + log.Fatalf("in service data, line %d, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", i, l.HumanFriendly(), l.AWSCLIV2CommandNoDashes(), l.GoV2Package()) } - if len(l[names.ColAWSCLIV2CommandNoDashes]) > len(l[names.ColGoV2Package]) && l[names.ColProviderPackageCorrect] != l[names.ColGoV2Package] { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", i, l[names.ColHumanFriendly], l[names.ColAWSCLIV2CommandNoDashes], l[names.ColGoV2Package]) + if len(l.AWSCLIV2CommandNoDashes()) > len(l.GoV2Package()) && l.ProviderPackageCorrect() != l.GoV2Package() { + log.Fatalf("in service data, line %d, for service %s, ProviderPackageCorrect must be shorter of AWSCLIV2CommandNoDashes (%s) and GoV2Package (%s)", i, l.HumanFriendly(), l.AWSCLIV2CommandNoDashes(), l.GoV2Package()) } } - if l[names.ColAWSCLIV2CommandNoDashes] == "" && l[names.ColGoV2Package] == "" && l[names.ColExclude] == "" { - log.Fatalf("in service data, line %d, for service %s, if Exclude is blank, either AWSCLIV2CommandNoDashes or GoV2Package must have values", i, l[names.ColHumanFriendly]) + if l.AWSCLIV2CommandNoDashes() == "" && l.GoV2Package() == "" && !l.Exclude() { + log.Fatalf("in service data, line %d, for service %s, if Exclude is blank, either AWSCLIV2CommandNoDashes or GoV2Package must have values", i, l.HumanFriendly()) } - if l[names.ColProviderPackageActual] != "" && l[names.ColProviderPackageCorrect] == "" { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual can't be non-blank if ProviderPackageCorrect is blank", i, l[names.ColHumanFriendly]) + if l.ProviderPackageActual() != "" && l.ProviderPackageCorrect() == "" { + log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual can't be non-blank if ProviderPackageCorrect is blank", i, l.HumanFriendly()) } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" && l[names.ColExclude] == "" { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual and ProviderPackageCorrect cannot both be blank unless Exclude is non-blank", i, l[names.ColHumanFriendly]) + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" && !l.Exclude() { + log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual and ProviderPackageCorrect cannot both be blank unless Exclude is non-blank", i, l.HumanFriendly()) } - if l[names.ColProviderPackageCorrect] != "" && l[names.ColProviderPackageActual] == l[names.ColProviderPackageCorrect] { - log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual should only be used if different from ProviderPackageCorrect", i, l[names.ColHumanFriendly]) + if l.ProviderPackageCorrect() != "" && l.ProviderPackageActual() == l.ProviderPackageCorrect() { + log.Fatalf("in service data, line %d, for service %s, ProviderPackageActual should only be used if different from ProviderPackageCorrect", i, l.HumanFriendly()) } - packageToUse := l[names.ColProviderPackageCorrect] + packageToUse := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - packageToUse = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + packageToUse = l.ProviderPackageActual() } - if l[names.ColAliases] != "" && packageToUse != "" { - p := strings.Split(l[names.ColAliases], ";") - + if p := l.Aliases(); len(p) > 0 && packageToUse != "" { for _, v := range p { if v == packageToUse { - log.Fatalf("in service data, line %d, for service %s, Aliases should not include ProviderPackageActual, if not blank, or ProviderPackageCorrect, if not blank and ProviderPackageActual is blank", i, l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, Aliases should not include ProviderPackageActual, if not blank, or ProviderPackageCorrect, if not blank and ProviderPackageActual is blank", i, l.HumanFriendly()) } } } - if l[names.ColClientSDKV1] == "" && l[names.ColClientSDKV2] == "" && l[names.ColExclude] == "" { - log.Fatalf("in service data, line %d, for service %s, at least one of ClientSDKV1 or ClientSDKV2 must have a value if Exclude is blank", i, l[names.ColHumanFriendly]) + if l.ClientSDKV1() == "" && l.ClientSDKV2() == "" && !l.Exclude() { + log.Fatalf("in service data, line %d, for service %s, at least one of ClientSDKV1 or ClientSDKV2 must have a value if Exclude is blank", i, l.HumanFriendly()) } - if l[names.ColClientSDKV1] != "" && (l[names.ColGoV1Package] == "" || l[names.ColGoV1ClientTypeName] == "") { - log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 1 so neither GoV1Package nor GoV1ClientTypeName can be blank", i, l[names.ColHumanFriendly]) + if l.ClientSDKV1() != "" && (l.GoV1Package() == "" || l.GoV1ClientTypeName() == "") { + log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 1 so neither GoV1Package nor GoV1ClientTypeName can be blank", i, l.HumanFriendly()) } - if l[names.ColClientSDKV2] != "" && l[names.ColGoV2Package] == "" { - log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 2 so GoV2Package cannot be blank", i, l[names.ColHumanFriendly]) + if l.ClientSDKV2() != "" && l.GoV2Package() == "" { + log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 2 so GoV2Package cannot be blank", i, l.HumanFriendly()) } - if l[names.ColResourcePrefixCorrect] != "" && l[names.ColResourcePrefixCorrect] != fmt.Sprintf("aws_%s_", l[names.ColProviderPackageCorrect]) { - log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", i, l[names.ColHumanFriendly]) + if l.ResourcePrefixCorrect() != "" && l.ResourcePrefixCorrect() != fmt.Sprintf("aws_%s_", l.ProviderPackageCorrect()) { + log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", i, l.HumanFriendly()) } - if l[names.ColResourcePrefixCorrect] != "" && l[names.ColResourcePrefixActual] == l[names.ColResourcePrefixCorrect] { - log.Fatalf("in service data, line %d, for service %s, ResourcePrefixActual should not be the same as ResourcePrefixCorrect, set ResourcePrefixActual to blank", i, l[names.ColHumanFriendly]) + if l.ResourcePrefixCorrect() != "" && l.ResourcePrefixActual() == l.ResourcePrefixCorrect() { + log.Fatalf("in service data, line %d, for service %s, ResourcePrefixActual should not be the same as ResourcePrefixCorrect, set ResourcePrefixActual to blank", i, l.HumanFriendly()) } - if l[names.ColSplitPackageRealPackage] != "" && (l[names.ColProviderPackageCorrect] == "" || l[names.ColFilePrefix] == "" || l[names.ColResourcePrefixActual] == "") { - log.Fatalf("in service data, line %d, for service %s, if SplitPackageRealPackage has a value, ProviderPackageCorrect, ResourcePrefixActual and FilePrefix must have values", i, l[names.ColHumanFriendly]) + if l.SplitPackageRealPackage() != "" && (l.ProviderPackageCorrect() == "" || l.FilePrefix() == "" || l.ResourcePrefixActual() == "") { + log.Fatalf("in service data, line %d, for service %s, if SplitPackageRealPackage has a value, ProviderPackageCorrect, ResourcePrefixActual and FilePrefix must have values", i, l.HumanFriendly()) } - if l[names.ColSplitPackageRealPackage] == "" && l[names.ColFilePrefix] != "" { - log.Fatalf("in service data, line %d, for service %s, if SplitPackageRealPackge is blank, FilePrefix must also be blank", i, l[names.ColHumanFriendly]) + if l.SplitPackageRealPackage() == "" && l.FilePrefix() != "" { + log.Fatalf("in service data, line %d, for service %s, if SplitPackageRealPackge is blank, FilePrefix must also be blank", i, l.HumanFriendly()) } - if l[names.ColBrand] != "AWS" && l[names.ColBrand] != "Amazon" && l[names.ColBrand] != "" { - log.Fatalf("in service data, line %d, for service %s, Brand must be AWS, Amazon, or blank; found %s", l[names.ColHumanFriendly], i, l[names.ColBrand]) + if l.Brand() != "AWS" && l.Brand() != "Amazon" && l.Brand() != "" { + log.Fatalf("in service data, line %d, for service %s, Brand must be AWS, Amazon, or blank; found %s", l.HumanFriendly(), i, l.Brand()) } - if (l[names.ColExclude] == "" || (l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] != "")) && l[names.ColDocPrefix] == "" { - log.Fatalf("in service data, line %d, for service %s, DocPrefix cannot be blank unless Exclude is non-blank and AllowedSubcategory is blank", i, l[names.ColHumanFriendly]) + if (!l.Exclude() || (l.Exclude() && l.AllowedSubcategory() != "")) && l.DocPrefix() == "" { + log.Fatalf("in service data, line %d, for service %s, DocPrefix cannot be blank unless Exclude is non-blank and AllowedSubcategory is blank", i, l.HumanFriendly()) } - checkAllLowercase(i, l[names.ColHumanFriendly], "AWSCLIV2Command", l[names.ColAWSCLIV2Command]) - checkAllLowercase(i, l[names.ColHumanFriendly], "AWSCLIV2CommandNoDashes", l[names.ColAWSCLIV2CommandNoDashes]) - checkAllLowercase(i, l[names.ColHumanFriendly], "GoV1Package", l[names.ColGoV1Package]) - checkAllLowercase(i, l[names.ColHumanFriendly], "GoV2Package", l[names.ColGoV2Package]) - checkAllLowercase(i, l[names.ColHumanFriendly], "ProviderPackageActual", l[names.ColProviderPackageActual]) - checkAllLowercase(i, l[names.ColHumanFriendly], "ProviderPackageCorrect", l[names.ColProviderPackageCorrect]) - checkAllLowercase(i, l[names.ColHumanFriendly], "SplitPackageRealPackage", l[names.ColSplitPackageRealPackage]) - checkAllLowercase(i, l[names.ColHumanFriendly], "Aliases", l[names.ColAliases]) - checkAllLowercase(i, l[names.ColHumanFriendly], "ResourcePrefixActual", l[names.ColResourcePrefixActual]) - checkAllLowercase(i, l[names.ColHumanFriendly], "ResourcePrefixCorrect", l[names.ColResourcePrefixCorrect]) - checkAllLowercase(i, l[names.ColHumanFriendly], "FilePrefix", l[names.ColFilePrefix]) - checkAllLowercase(i, l[names.ColHumanFriendly], "DocPrefix", l[names.ColDocPrefix]) - - checkNotAllLowercase(i, l[names.ColHumanFriendly], "ProviderNameUpper", l[names.ColProviderNameUpper]) - checkNotAllLowercase(i, l[names.ColHumanFriendly], "GoV1ClientTypeName", l[names.ColGoV1ClientTypeName]) - checkNotAllLowercase(i, l[names.ColHumanFriendly], "HumanFriendly", l[names.ColHumanFriendly]) - - if l[names.ColExclude] == "" && l[names.ColAllowedSubcategory] != "" { - log.Fatalf("in service data, line %d, for service %s, AllowedSubcategory can only be non-blank if Exclude is non-blank", i, l[names.ColHumanFriendly]) + checkAllLowercase(i, l.HumanFriendly(), "AWSCLIV2Command", l.AWSCLIV2Command()) + checkAllLowercase(i, l.HumanFriendly(), "AWSCLIV2CommandNoDashes", l.AWSCLIV2CommandNoDashes()) + checkAllLowercase(i, l.HumanFriendly(), "GoV1Package", l.GoV1Package()) + checkAllLowercase(i, l.HumanFriendly(), "GoV2Package", l.GoV2Package()) + checkAllLowercase(i, l.HumanFriendly(), "ProviderPackageActual", l.ProviderPackageActual()) + checkAllLowercase(i, l.HumanFriendly(), "ProviderPackageCorrect", l.ProviderPackageCorrect()) + checkAllLowercase(i, l.HumanFriendly(), "SplitPackageRealPackage", l.SplitPackageRealPackage()) + checkAllLowercase(i, l.HumanFriendly(), "Aliases", l.Aliases()...) + checkAllLowercase(i, l.HumanFriendly(), "ResourcePrefixActual", l.ResourcePrefixActual()) + checkAllLowercase(i, l.HumanFriendly(), "ResourcePrefixCorrect", l.ResourcePrefixCorrect()) + checkAllLowercase(i, l.HumanFriendly(), "FilePrefix", l.FilePrefix()) + checkAllLowercase(i, l.HumanFriendly(), "DocPrefix", l.DocPrefix()) + + checkNotAllLowercase(i, l.HumanFriendly(), "ProviderNameUpper", l.ProviderNameUpper()) + checkNotAllLowercase(i, l.HumanFriendly(), "GoV1ClientTypeName", l.GoV1ClientTypeName()) + checkNotAllLowercase(i, l.HumanFriendly(), "HumanFriendly", l.HumanFriendly()) + + if !l.Exclude() && l.AllowedSubcategory() != "" { + log.Fatalf("in service data, line %d, for service %s, AllowedSubcategory can only be non-blank if Exclude is non-blank", i, l.HumanFriendly()) } - if l[names.ColExclude] != "" && l[names.ColNote] == "" { - log.Fatalf("in service data, line %d, for service %s, if Exclude is not blank, include a Note why", i, l[names.ColHumanFriendly]) + if l.Exclude() && l.Note() == "" { + log.Fatalf("in service data, line %d, for service %s, if Exclude is not blank, include a Note why", i, l.HumanFriendly()) } - if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { + if l.Exclude() && l.AllowedSubcategory() == "" { continue } - deprecatedEnvVar := l[names.ColDeprecatedEnvVar] != "" - tfAwsEnvVar := l[names.ColTfAwsEnvVar] != "" + deprecatedEnvVar := l.DeprecatedEnvVar() != "" + tfAwsEnvVar := l.TfAwsEnvVar() != "" if deprecatedEnvVar != tfAwsEnvVar { - log.Fatalf("in service data, line %d, for service %s, either both DeprecatedEnvVar and TfAwsEnvVar must be specified or neither can be", i, l[names.ColHumanFriendly]) + log.Fatalf("in service data, line %d, for service %s, either both DeprecatedEnvVar and TfAwsEnvVar must be specified or neither can be", i, l.HumanFriendly()) } - rre := l[names.ColResourcePrefixActual] + rre := l.ResourcePrefixActual() if rre == "" { - rre = l[names.ColResourcePrefixCorrect] + rre = l.ResourcePrefixCorrect() } docPrefixes = append(docPrefixes, DocPrefix{ - HumanFriendly: l[names.ColHumanFriendly], - DocPrefixRegex: strings.Split(l[names.ColDocPrefix], ";"), + HumanFriendly: l.HumanFriendly(), + DocPrefixRegex: strings.Split(l.DocPrefix(), ";"), ResourceRegex: rre, }) @@ -210,9 +207,11 @@ func main() { fmt.Printf(" Checked %d documentation files to ensure filename prefix, resource name, label regex, and subcategory match, 0 errors.\n", allDocs) } -func checkAllLowercase(i int, service, name, value string) { - if value != "" && strings.ToLower(value) != value { - log.Fatalf("in service data, line %d, for service %s, %s should not include uppercase letters (%s)", i, service, name, value) +func checkAllLowercase(i int, service, name string, values ...string) { + for _, value := range values { + if value != "" && strings.ToLower(value) != value { + log.Fatalf("in service data, line %d, for service %s, %s should not include uppercase letters (%s)", i, service, name, value) + } } } diff --git a/internal/generate/issuelabels/main.go b/internal/generate/issuelabels/main.go index 06178e1135f..4ac4e7b4d76 100644 --- a/internal/generate/issuelabels/main.go +++ b/internal/generate/issuelabels/main.go @@ -12,7 +12,6 @@ import ( "strings" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -42,28 +41,28 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { + if l.Exclude() && l.AllowedSubcategory() == "" { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - if l[names.ColResourcePrefixActual] == "" && l[names.ColResourcePrefixCorrect] == "" { + if l.ResourcePrefixActual() == "" && l.ResourcePrefixCorrect() == "" { continue } - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } - rp := l[names.ColResourcePrefixCorrect] + rp := l.ResourcePrefixCorrect() - if l[names.ColResourcePrefixActual] != "" { - rp = l[names.ColResourcePrefixActual] + if l.ResourcePrefixActual() != "" { + rp = l.ResourcePrefixActual() } s := ServiceDatum{ diff --git a/internal/generate/prlabels/main.go b/internal/generate/prlabels/main.go index 734d261b279..d506ccb2148 100644 --- a/internal/generate/prlabels/main.go +++ b/internal/generate/prlabels/main.go @@ -12,7 +12,6 @@ import ( "strings" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -44,31 +43,31 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { + if l.Exclude() && l.AllowedSubcategory() == "" { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } ap := p - if l[names.ColSplitPackageRealPackage] != "" { - ap = l[names.ColSplitPackageRealPackage] + if l.SplitPackageRealPackage() != "" { + ap = l.SplitPackageRealPackage() } s := ServiceDatum{ ProviderPackage: p, ActualPackage: ap, - FilePrefix: l[names.ColFilePrefix], - DocPrefixes: strings.Split(l[names.ColDocPrefix], ";"), + FilePrefix: l.FilePrefix(), + DocPrefixes: strings.Split(l.DocPrefix(), ";"), } td.Services = append(td.Services, s) diff --git a/internal/generate/servicelabels/main.go b/internal/generate/servicelabels/main.go index c09d4fd8cef..5a77506efc6 100644 --- a/internal/generate/servicelabels/main.go +++ b/internal/generate/servicelabels/main.go @@ -12,7 +12,6 @@ import ( "strings" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -41,18 +40,18 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { + if l.Exclude() && l.AllowedSubcategory() == "" { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } s := ServiceDatum{ diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index 7284f8d5b05..1e36076a750 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -19,7 +19,6 @@ import ( "github.com/YakDriver/regexache" multierror "github.com/hashicorp/go-multierror" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" "golang.org/x/exp/slices" ) @@ -41,15 +40,15 @@ func main() { g.Infof("Generating internal/service/%s/%s", servicePackage, filename) for _, l := range data { - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } // See internal/generate/namesconsts/main.go. - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } if p != servicePackage { @@ -74,23 +73,23 @@ func main() { } s := ServiceDatum{ - SkipClientGenerate: l[names.ColSkipClientGenerate] != "", - GoV1Package: l[names.ColGoV1Package], - GoV2Package: l[names.ColGoV2Package], + SkipClientGenerate: l.SkipClientGenerate(), + GoV1Package: l.GoV1Package(), + GoV2Package: l.GoV2Package(), ProviderPackage: p, - ProviderNameUpper: l[names.ColProviderNameUpper], + ProviderNameUpper: l.ProviderNameUpper(), FrameworkDataSources: v.frameworkDataSources, FrameworkResources: v.frameworkResources, SDKDataSources: v.sdkDataSources, SDKResources: v.sdkResources, } - if l[names.ColClientSDKV1] != "" { + if l.ClientSDKV1() != "" { s.SDKVersion = "1" - s.GoV1ClientTypeName = l[names.ColGoV1ClientTypeName] + s.GoV1ClientTypeName = l.GoV1ClientTypeName() } - if l[names.ColClientSDKV2] != "" { - if l[names.ColClientSDKV1] != "" { + if l.ClientSDKV2() != "" { + if l.ClientSDKV1() != "" { s.SDKVersion = "1,2" } else { s.SDKVersion = "2" diff --git a/internal/generate/servicepackages/main.go b/internal/generate/servicepackages/main.go index 4b7756e432f..dfe56c5135c 100644 --- a/internal/generate/servicepackages/main.go +++ b/internal/generate/servicepackages/main.go @@ -14,7 +14,6 @@ import ( "sort" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -44,15 +43,15 @@ func main() { } for _, l := range data { - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } // See internal/generate/namesconsts/main.go. - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } spdFile := fmt.Sprintf("../service/%s/service_package_gen.go", p) diff --git a/internal/generate/servicesemgrep/main.go b/internal/generate/servicesemgrep/main.go index d21e1d715a5..5f15f388c90 100644 --- a/internal/generate/servicesemgrep/main.go +++ b/internal/generate/servicesemgrep/main.go @@ -20,7 +20,6 @@ import ( "github.com/YakDriver/regexache" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -110,60 +109,58 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" && l[names.ColAllowedSubcategory] == "" { + if l.Exclude() && l.AllowedSubcategory() == "" { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } rp := p - if l[names.ColSplitPackageRealPackage] != "" { - rp = l[names.ColSplitPackageRealPackage] + if l.SplitPackageRealPackage() != "" { + rp = l.SplitPackageRealPackage() } if _, err := os.Stat(fmt.Sprintf("../../service/%s", rp)); err != nil || errors.Is(err, fs.ErrNotExist) { continue } - if l[names.ColAliases] != "" { - for _, v := range strings.Split(l[names.ColAliases], ";") { - if strings.ToLower(v) == "es" { - continue // "es" is too short to usefully grep - } - - if strings.ToLower(v) == "config" { - continue // "config" is too ubiquitous - } + for _, v := range l.Aliases() { + if strings.ToLower(v) == "es" { + continue // "es" is too short to usefully grep + } - sd := ServiceDatum{ - ProviderPackage: rp, - ServiceAlias: v, - LowerAlias: strings.ToLower(v), - MainAlias: false, - } + if strings.ToLower(v) == "config" { + continue // "config" is too ubiquitous + } - td.Services = append(td.Services, sd) + sd := ServiceDatum{ + ProviderPackage: rp, + ServiceAlias: v, + LowerAlias: strings.ToLower(v), + MainAlias: false, } + + td.Services = append(td.Services, sd) } sd := ServiceDatum{ ProviderPackage: rp, - ServiceAlias: l[names.ColProviderNameUpper], + ServiceAlias: l.ProviderNameUpper(), LowerAlias: strings.ToLower(p), MainAlias: true, } - if l[names.ColFilePrefix] != "" { - sd.FilePrefix = l[names.ColFilePrefix] + if l.FilePrefix() != "" { + sd.FilePrefix = l.FilePrefix() } td.Services = append(td.Services, sd) diff --git a/internal/generate/sweeperregistration/main.go b/internal/generate/sweeperregistration/main.go index 2a63228dc4e..c2e2cb3e89e 100644 --- a/internal/generate/sweeperregistration/main.go +++ b/internal/generate/sweeperregistration/main.go @@ -15,7 +15,6 @@ import ( "sort" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -49,18 +48,18 @@ func main() { } for _, l := range data { - if l[names.ColExclude] != "" { + if l.Exclude() { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } if _, err := os.Stat(fmt.Sprintf("../service/%s", p)); err != nil || errors.Is(err, fs.ErrNotExist) { diff --git a/internal/generate/teamcity/services.go b/internal/generate/teamcity/services.go index 5fc5940b040..672328d87d4 100644 --- a/internal/generate/teamcity/services.go +++ b/internal/generate/teamcity/services.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/hcl/v2/hclsimple" "github.com/hashicorp/terraform-provider-aws/internal/generate/common" - "github.com/hashicorp/terraform-provider-aws/names" "github.com/hashicorp/terraform-provider-aws/names/data" ) @@ -58,18 +57,18 @@ func main() { td := TemplateData{} for _, l := range data { - if l[names.ColExclude] != "" { + if l.Exclude() { continue } - if l[names.ColProviderPackageActual] == "" && l[names.ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - p := l[names.ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[names.ColProviderPackageActual] != "" { - p = l[names.ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } if _, err := os.Stat(fmt.Sprintf("../../service/%s", p)); err != nil || errors.Is(err, fs.ErrNotExist) { @@ -78,7 +77,7 @@ func main() { sd := ServiceDatum{ ProviderPackage: p, - HumanFriendly: l[names.ColHumanFriendly], + HumanFriendly: l.HumanFriendly(), } serviceConfig, ok := serviceConfigs[p] if ok { diff --git a/internal/provider/provider.go b/internal/provider/provider.go index 58adbcd531a..b0523543fcf 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -903,7 +903,7 @@ func expandEndpoints(_ context.Context, tfList []interface{}) (map[string]string continue } - envVar := names.EnvVar(pkg) + envVar := names.TfAwsEnvVar(pkg) if envVar != "" { if v := os.Getenv(envVar); v != "" { endpoints[pkg] = v diff --git a/names/data/read.go b/names/data/read.go index 953fa9bfd27..778c1548d24 100644 --- a/names/data/read.go +++ b/names/data/read.go @@ -8,10 +8,118 @@ import ( _ "embed" "encoding/csv" "io" + "strings" ) type ServiceRecord []string +func (sr ServiceRecord) AWSCLIV2Command() string { + return sr[colAWSCLIV2Command] +} + +func (sr ServiceRecord) AWSCLIV2CommandNoDashes() string { + return sr[colAWSCLIV2CommandNoDashes] +} + +func (sr ServiceRecord) GoV1Package() string { + return sr[colGoV1Package] +} + +func (sr ServiceRecord) GoV2Package() string { + return sr[colGoV2Package] +} + +func (sr ServiceRecord) ProviderPackageActual() string { + return sr[colProviderPackageActual] +} + +func (sr ServiceRecord) ProviderPackageCorrect() string { + return sr[colProviderPackageCorrect] +} + +func (sr ServiceRecord) SplitPackageRealPackage() string { + return sr[colSplitPackageRealPackage] +} + +func (sr ServiceRecord) Aliases() []string { + if sr[colAliases] == "" { + return nil + } + return strings.Split(sr[colAliases], ";") +} + +func (sr ServiceRecord) ProviderNameUpper() string { + return sr[colProviderNameUpper] +} + +func (sr ServiceRecord) GoV1ClientTypeName() string { + return sr[colGoV1ClientTypeName] +} + +func (sr ServiceRecord) SkipClientGenerate() bool { + return sr[colSkipClientGenerate] != "" +} + +func (sr ServiceRecord) ClientSDKV1() string { + return sr[colClientSDKV1] +} + +func (sr ServiceRecord) ClientSDKV2() string { + return sr[colClientSDKV2] +} + +func (sr ServiceRecord) ResourcePrefixActual() string { + return sr[colResourcePrefixActual] +} + +func (sr ServiceRecord) ResourcePrefixCorrect() string { + return sr[colResourcePrefixCorrect] +} + +func (sr ServiceRecord) FilePrefix() string { + return sr[colFilePrefix] +} + +func (sr ServiceRecord) DocPrefix() string { + return sr[colDocPrefix] +} + +func (sr ServiceRecord) HumanFriendly() string { + return sr[colHumanFriendly] +} + +func (sr ServiceRecord) Brand() string { + return sr[colBrand] +} + +func (sr ServiceRecord) Exclude() bool { + return sr[colExclude] != "" +} + +func (sr ServiceRecord) NotImplemented() bool { + return sr[colNotImplemented] != "" +} + +func (sr ServiceRecord) EndpointOnly() bool { + return sr[colEndpointOnly] != "" +} + +func (sr ServiceRecord) AllowedSubcategory() string { + return sr[colAllowedSubcategory] +} + +func (sr ServiceRecord) DeprecatedEnvVar() string { + return sr[colDeprecatedEnvVar] +} + +func (sr ServiceRecord) TfAwsEnvVar() string { + return sr[colTfAwsEnvVar] +} + +func (sr ServiceRecord) Note() string { + return sr[colNote] +} + func ReadAllServiceData() (results []ServiceRecord, err error) { reader := csv.NewReader(bytes.NewReader(namesData)) // reader.ReuseRecord = true @@ -35,3 +143,32 @@ func ReadAllServiceData() (results []ServiceRecord, err error) { //go:embed names_data.csv var namesData []byte + +const ( + colAWSCLIV2Command = iota + colAWSCLIV2CommandNoDashes + colGoV1Package + colGoV2Package + colProviderPackageActual + colProviderPackageCorrect + colSplitPackageRealPackage + colAliases + colProviderNameUpper + colGoV1ClientTypeName + colSkipClientGenerate + colClientSDKV1 + colClientSDKV2 + colResourcePrefixActual + colResourcePrefixCorrect + colFilePrefix + colDocPrefix + colHumanFriendly + colBrand + colExclude // If set, the service is completely ignored + colNotImplemented // If set, the service will be included in, e.g. labels, but not have a service client + colEndpointOnly // If set, the service is included in list of endpoints + colAllowedSubcategory + colDeprecatedEnvVar // Deprecated `AWS__ENDPOINT` envvar defined for some services + colTfAwsEnvVar // `TF_AWS__ENDPOINT` envvar defined for some services + colNote +) diff --git a/names/names.go b/names/names.go index 1993da0cde5..7b9f5f74231 100644 --- a/names/names.go +++ b/names/names.go @@ -18,7 +18,6 @@ package names import ( "fmt" "log" - "strings" "github.com/hashicorp/terraform-provider-aws/names/data" "golang.org/x/exp/slices" @@ -121,12 +120,12 @@ type ServiceDatum struct { Brand string DeprecatedEnvVar string EndpointOnly bool - EnvVar string GoV1ClientTypeName string GoV1Package string GoV2Package string HumanFriendly string ProviderNameUpper string + TfAwsEnvVar string } // serviceData key is the AWS provider service package @@ -151,40 +150,40 @@ func readCSVIntoServiceData() error { } for _, l := range d { - if l[ColExclude] != "" { + if l.Exclude() { continue } - if l[ColNotImplemented] != "" && l[ColEndpointOnly] == "" { + if l.NotImplemented() && !l.EndpointOnly() { continue } - if l[ColProviderPackageActual] == "" && l[ColProviderPackageCorrect] == "" { + if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { continue } - p := l[ColProviderPackageCorrect] + p := l.ProviderPackageCorrect() - if l[ColProviderPackageActual] != "" { - p = l[ColProviderPackageActual] + if l.ProviderPackageActual() != "" { + p = l.ProviderPackageActual() } serviceData[p] = &ServiceDatum{ - Brand: l[ColBrand], - DeprecatedEnvVar: l[ColDeprecatedEnvVar], - EndpointOnly: l[ColEndpointOnly] != "", - EnvVar: l[ColTfAwsEnvVar], - GoV1ClientTypeName: l[ColGoV1ClientTypeName], - GoV1Package: l[ColGoV1Package], - GoV2Package: l[ColGoV2Package], - HumanFriendly: l[ColHumanFriendly], - ProviderNameUpper: l[ColProviderNameUpper], + Brand: l.Brand(), + DeprecatedEnvVar: l.DeprecatedEnvVar(), + EndpointOnly: l.EndpointOnly(), + GoV1ClientTypeName: l.GoV1ClientTypeName(), + GoV1Package: l.GoV1Package(), + GoV2Package: l.GoV2Package(), + HumanFriendly: l.HumanFriendly(), + ProviderNameUpper: l.ProviderNameUpper(), + TfAwsEnvVar: l.TfAwsEnvVar(), } a := []string{p} - if l[ColAliases] != "" { - a = append(a, strings.Split(l[ColAliases], ";")...) + if len(l.Aliases()) > 0 { + a = append(a, l.Aliases()...) } serviceData[p].Aliases = a @@ -287,9 +286,9 @@ func DeprecatedEnvVar(service string) string { return "" } -func EnvVar(service string) string { +func TfAwsEnvVar(service string) string { if v, ok := serviceData[service]; ok { - return v.EnvVar + return v.TfAwsEnvVar } return "" From 1495f28ee47d24c7b9a8015094f2199f968a6acd Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Dec 2023 17:20:42 -0800 Subject: [PATCH 356/438] Removes `continue` when package actual and package correct are both empty. This is checked in `checknames` --- internal/generate/awsclient/main.go | 4 ---- internal/generate/issuelabels/main.go | 4 ---- internal/generate/prlabels/main.go | 4 ---- internal/generate/servicelabels/main.go | 4 ---- internal/generate/servicepackage/main.go | 4 ---- internal/generate/servicepackages/main.go | 4 ---- internal/generate/servicesemgrep/main.go | 4 ---- internal/generate/sweeperregistration/main.go | 4 ---- internal/generate/teamcity/services.go | 4 ---- names/names.go | 4 ---- 10 files changed, 40 deletions(-) diff --git a/internal/generate/awsclient/main.go b/internal/generate/awsclient/main.go index b5e7a70faa5..135930e267e 100644 --- a/internal/generate/awsclient/main.go +++ b/internal/generate/awsclient/main.go @@ -51,10 +51,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - s := ServiceDatum{ ProviderNameUpper: l.ProviderNameUpper(), GoV1Package: l.GoV1Package(), diff --git a/internal/generate/issuelabels/main.go b/internal/generate/issuelabels/main.go index 4ac4e7b4d76..caeb8b4fa3c 100644 --- a/internal/generate/issuelabels/main.go +++ b/internal/generate/issuelabels/main.go @@ -45,10 +45,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - if l.ResourcePrefixActual() == "" && l.ResourcePrefixCorrect() == "" { continue } diff --git a/internal/generate/prlabels/main.go b/internal/generate/prlabels/main.go index d506ccb2148..6d3fceccbc3 100644 --- a/internal/generate/prlabels/main.go +++ b/internal/generate/prlabels/main.go @@ -47,10 +47,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - p := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { diff --git a/internal/generate/servicelabels/main.go b/internal/generate/servicelabels/main.go index 5a77506efc6..77223b5fdef 100644 --- a/internal/generate/servicelabels/main.go +++ b/internal/generate/servicelabels/main.go @@ -44,10 +44,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - p := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index 1e36076a750..f231591fa83 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -40,10 +40,6 @@ func main() { g.Infof("Generating internal/service/%s/%s", servicePackage, filename) for _, l := range data { - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - // See internal/generate/namesconsts/main.go. p := l.ProviderPackageCorrect() diff --git a/internal/generate/servicepackages/main.go b/internal/generate/servicepackages/main.go index dfe56c5135c..1c88ee82330 100644 --- a/internal/generate/servicepackages/main.go +++ b/internal/generate/servicepackages/main.go @@ -43,10 +43,6 @@ func main() { } for _, l := range data { - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - // See internal/generate/namesconsts/main.go. p := l.ProviderPackageCorrect() diff --git a/internal/generate/servicesemgrep/main.go b/internal/generate/servicesemgrep/main.go index 5f15f388c90..2cff690f48a 100644 --- a/internal/generate/servicesemgrep/main.go +++ b/internal/generate/servicesemgrep/main.go @@ -113,10 +113,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - p := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { diff --git a/internal/generate/sweeperregistration/main.go b/internal/generate/sweeperregistration/main.go index c2e2cb3e89e..db7a8bd3603 100644 --- a/internal/generate/sweeperregistration/main.go +++ b/internal/generate/sweeperregistration/main.go @@ -52,10 +52,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - p := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { diff --git a/internal/generate/teamcity/services.go b/internal/generate/teamcity/services.go index 672328d87d4..847a685df22 100644 --- a/internal/generate/teamcity/services.go +++ b/internal/generate/teamcity/services.go @@ -61,10 +61,6 @@ func main() { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - p := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { diff --git a/names/names.go b/names/names.go index 7b9f5f74231..7783d8873e4 100644 --- a/names/names.go +++ b/names/names.go @@ -158,10 +158,6 @@ func readCSVIntoServiceData() error { continue } - if l.ProviderPackageActual() == "" && l.ProviderPackageCorrect() == "" { - continue - } - p := l.ProviderPackageCorrect() if l.ProviderPackageActual() != "" { From 2c6c583e1988f0bb5068eaa0d3dd873d37761acf Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Dec 2023 14:24:27 -0800 Subject: [PATCH 357/438] Replaces use of `ProviderPackageCorrect` and `ProviderPackageActual` together with `ProviderPackage`, to consolidate logic --- internal/generate/issuelabels/main.go | 6 +----- internal/generate/prlabels/main.go | 6 +----- internal/generate/servicelabels/main.go | 6 +----- internal/generate/servicepackage/main.go | 6 +----- internal/generate/servicepackages/main.go | 6 +----- internal/generate/servicesemgrep/main.go | 6 +----- internal/generate/sweeperregistration/main.go | 6 +----- internal/generate/teamcity/services.go | 6 +----- names/data/read.go | 8 ++++++++ names/names.go | 6 +----- 10 files changed, 17 insertions(+), 45 deletions(-) diff --git a/internal/generate/issuelabels/main.go b/internal/generate/issuelabels/main.go index caeb8b4fa3c..53b5b0175c2 100644 --- a/internal/generate/issuelabels/main.go +++ b/internal/generate/issuelabels/main.go @@ -49,11 +49,7 @@ func main() { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() rp := l.ResourcePrefixCorrect() diff --git a/internal/generate/prlabels/main.go b/internal/generate/prlabels/main.go index 6d3fceccbc3..de064060493 100644 --- a/internal/generate/prlabels/main.go +++ b/internal/generate/prlabels/main.go @@ -47,11 +47,7 @@ func main() { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() ap := p diff --git a/internal/generate/servicelabels/main.go b/internal/generate/servicelabels/main.go index 77223b5fdef..d1e0e0c1700 100644 --- a/internal/generate/servicelabels/main.go +++ b/internal/generate/servicelabels/main.go @@ -44,11 +44,7 @@ func main() { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() s := ServiceDatum{ ProviderPackage: p, diff --git a/internal/generate/servicepackage/main.go b/internal/generate/servicepackage/main.go index f231591fa83..f47abf8761c 100644 --- a/internal/generate/servicepackage/main.go +++ b/internal/generate/servicepackage/main.go @@ -41,11 +41,7 @@ func main() { for _, l := range data { // See internal/generate/namesconsts/main.go. - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() if p != servicePackage { continue diff --git a/internal/generate/servicepackages/main.go b/internal/generate/servicepackages/main.go index 1c88ee82330..278a3581a07 100644 --- a/internal/generate/servicepackages/main.go +++ b/internal/generate/servicepackages/main.go @@ -44,11 +44,7 @@ func main() { for _, l := range data { // See internal/generate/namesconsts/main.go. - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() spdFile := fmt.Sprintf("../service/%s/service_package_gen.go", p) diff --git a/internal/generate/servicesemgrep/main.go b/internal/generate/servicesemgrep/main.go index 2cff690f48a..a58f2c53d95 100644 --- a/internal/generate/servicesemgrep/main.go +++ b/internal/generate/servicesemgrep/main.go @@ -113,11 +113,7 @@ func main() { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() rp := p diff --git a/internal/generate/sweeperregistration/main.go b/internal/generate/sweeperregistration/main.go index db7a8bd3603..e966783d025 100644 --- a/internal/generate/sweeperregistration/main.go +++ b/internal/generate/sweeperregistration/main.go @@ -52,11 +52,7 @@ func main() { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() if _, err := os.Stat(fmt.Sprintf("../service/%s", p)); err != nil || errors.Is(err, fs.ErrNotExist) { continue diff --git a/internal/generate/teamcity/services.go b/internal/generate/teamcity/services.go index 847a685df22..971707a622a 100644 --- a/internal/generate/teamcity/services.go +++ b/internal/generate/teamcity/services.go @@ -61,11 +61,7 @@ func main() { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() if _, err := os.Stat(fmt.Sprintf("../../service/%s", p)); err != nil || errors.Is(err, fs.ErrNotExist) { continue diff --git a/names/data/read.go b/names/data/read.go index 778c1548d24..f1cb5bbdc89 100644 --- a/names/data/read.go +++ b/names/data/read.go @@ -29,6 +29,14 @@ func (sr ServiceRecord) GoV2Package() string { return sr[colGoV2Package] } +func (sr ServiceRecord) ProviderPackage() string { + pkg := sr.ProviderPackageCorrect() + if sr.ProviderPackageActual() != "" { + pkg = sr.ProviderPackageActual() + } + return pkg +} + func (sr ServiceRecord) ProviderPackageActual() string { return sr[colProviderPackageActual] } diff --git a/names/names.go b/names/names.go index 7783d8873e4..7e8211d8e1b 100644 --- a/names/names.go +++ b/names/names.go @@ -158,11 +158,7 @@ func readCSVIntoServiceData() error { continue } - p := l.ProviderPackageCorrect() - - if l.ProviderPackageActual() != "" { - p = l.ProviderPackageActual() - } + p := l.ProviderPackage() serviceData[p] = &ServiceDatum{ Brand: l.Brand(), From 259bfdf6ec9eb57c23f1dd544caa2877a3729632 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Dec 2023 14:45:58 -0800 Subject: [PATCH 358/438] Replaces use of `ResourcePrefixCorrect` and `ResourcePrefixActual` together with `ResourcePrefix`, to consolidate logic --- internal/generate/checknames/main.go | 6 +++++- internal/generate/issuelabels/main.go | 10 +--------- names/data/read.go | 8 ++++++++ 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/internal/generate/checknames/main.go b/internal/generate/checknames/main.go index 897e8994d69..1a57315526d 100644 --- a/internal/generate/checknames/main.go +++ b/internal/generate/checknames/main.go @@ -110,6 +110,10 @@ func main() { log.Fatalf("in service data, line %d, for service %s, SDKVersion is set to 2 so GoV2Package cannot be blank", i, l.HumanFriendly()) } + if l.ResourcePrefixCorrect() == "" && !l.Exclude() { + log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect must have a value if Exclude is blank", i, l.HumanFriendly()) + } + if l.ResourcePrefixCorrect() != "" && l.ResourcePrefixCorrect() != fmt.Sprintf("aws_%s_", l.ProviderPackageCorrect()) { log.Fatalf("in service data, line %d, for service %s, ResourcePrefixCorrect should be aws__, where is ProviderPackageCorrect", i, l.HumanFriendly()) } @@ -183,7 +187,7 @@ func main() { allChecks++ } - fmt.Printf(" Performed %d checks on names_data.csv, 0 errors.\n", (allChecks * 37)) + fmt.Printf(" Performed %d checks on service data, 0 errors.\n", (allChecks * 38)) var fileErrs bool diff --git a/internal/generate/issuelabels/main.go b/internal/generate/issuelabels/main.go index 53b5b0175c2..0b9299539f0 100644 --- a/internal/generate/issuelabels/main.go +++ b/internal/generate/issuelabels/main.go @@ -45,17 +45,9 @@ func main() { continue } - if l.ResourcePrefixActual() == "" && l.ResourcePrefixCorrect() == "" { - continue - } - p := l.ProviderPackage() - rp := l.ResourcePrefixCorrect() - - if l.ResourcePrefixActual() != "" { - rp = l.ResourcePrefixActual() - } + rp := l.ResourcePrefix() s := ServiceDatum{ ProviderPackage: p, diff --git a/names/data/read.go b/names/data/read.go index f1cb5bbdc89..4b3cf497759 100644 --- a/names/data/read.go +++ b/names/data/read.go @@ -76,6 +76,14 @@ func (sr ServiceRecord) ClientSDKV2() string { return sr[colClientSDKV2] } +func (sr ServiceRecord) ResourcePrefix() string { + prefix := sr.ResourcePrefixCorrect() + if sr.ResourcePrefixActual() != "" { + prefix = sr.ResourcePrefixActual() + } + return prefix +} + func (sr ServiceRecord) ResourcePrefixActual() string { return sr[colResourcePrefixActual] } From 188136a44d54714476a366e1c5cddf0aca86942a Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Tue, 19 Dec 2023 15:04:53 -0800 Subject: [PATCH 359/438] Returns slice of strings from `DocPrefix` --- internal/generate/checknames/main.go | 6 +++--- internal/generate/prlabels/main.go | 2 +- names/data/read.go | 7 +++++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/internal/generate/checknames/main.go b/internal/generate/checknames/main.go index 1a57315526d..8bc87efa168 100644 --- a/internal/generate/checknames/main.go +++ b/internal/generate/checknames/main.go @@ -134,7 +134,7 @@ func main() { log.Fatalf("in service data, line %d, for service %s, Brand must be AWS, Amazon, or blank; found %s", l.HumanFriendly(), i, l.Brand()) } - if (!l.Exclude() || (l.Exclude() && l.AllowedSubcategory() != "")) && l.DocPrefix() == "" { + if (!l.Exclude() || (l.Exclude() && l.AllowedSubcategory() != "")) && len(l.DocPrefix()) == 0 { log.Fatalf("in service data, line %d, for service %s, DocPrefix cannot be blank unless Exclude is non-blank and AllowedSubcategory is blank", i, l.HumanFriendly()) } @@ -149,7 +149,7 @@ func main() { checkAllLowercase(i, l.HumanFriendly(), "ResourcePrefixActual", l.ResourcePrefixActual()) checkAllLowercase(i, l.HumanFriendly(), "ResourcePrefixCorrect", l.ResourcePrefixCorrect()) checkAllLowercase(i, l.HumanFriendly(), "FilePrefix", l.FilePrefix()) - checkAllLowercase(i, l.HumanFriendly(), "DocPrefix", l.DocPrefix()) + checkAllLowercase(i, l.HumanFriendly(), "DocPrefix", l.DocPrefix()...) checkNotAllLowercase(i, l.HumanFriendly(), "ProviderNameUpper", l.ProviderNameUpper()) checkNotAllLowercase(i, l.HumanFriendly(), "GoV1ClientTypeName", l.GoV1ClientTypeName()) @@ -181,7 +181,7 @@ func main() { docPrefixes = append(docPrefixes, DocPrefix{ HumanFriendly: l.HumanFriendly(), - DocPrefixRegex: strings.Split(l.DocPrefix(), ";"), + DocPrefixRegex: l.DocPrefix(), ResourceRegex: rre, }) diff --git a/internal/generate/prlabels/main.go b/internal/generate/prlabels/main.go index de064060493..64210d0be4b 100644 --- a/internal/generate/prlabels/main.go +++ b/internal/generate/prlabels/main.go @@ -59,7 +59,7 @@ func main() { ProviderPackage: p, ActualPackage: ap, FilePrefix: l.FilePrefix(), - DocPrefixes: strings.Split(l.DocPrefix(), ";"), + DocPrefixes: l.DocPrefix(), } td.Services = append(td.Services, s) diff --git a/names/data/read.go b/names/data/read.go index 4b3cf497759..b702ca71656 100644 --- a/names/data/read.go +++ b/names/data/read.go @@ -96,8 +96,11 @@ func (sr ServiceRecord) FilePrefix() string { return sr[colFilePrefix] } -func (sr ServiceRecord) DocPrefix() string { - return sr[colDocPrefix] +func (sr ServiceRecord) DocPrefix() []string { + if sr[colDocPrefix] == "" { + return nil + } + return strings.Split(sr[colDocPrefix], ";") } func (sr ServiceRecord) HumanFriendly() string { From ac29e50ed62589c543c73c5cb89f7b3ae01c6496 Mon Sep 17 00:00:00 2001 From: breathingdust Date: Wed, 20 Dec 2023 09:03:53 +0000 Subject: [PATCH 360/438] docs: update resource counts --- website/docs/index.html.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/index.html.markdown b/website/docs/index.html.markdown index 41aa4bb473c..175cb31362e 100644 --- a/website/docs/index.html.markdown +++ b/website/docs/index.html.markdown @@ -11,7 +11,7 @@ Use the Amazon Web Services (AWS) provider to interact with the many resources supported by AWS. You must configure the provider with the proper credentials before you can use it. -Use the navigation to the left to read about the available resources. There are currently 1300 resources and 533 data sources available in the provider. +Use the navigation to the left to read about the available resources. There are currently 1304 resources and 536 data sources available in the provider. To learn the basics of Terraform using this provider, follow the hands-on [get started tutorials](https://learn.hashicorp.com/tutorials/terraform/infrastructure-as-code?in=terraform/aws-get-started&utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS). Interact with AWS services, From c032afeb5c0449d71949d9d91837aa3e27550207 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 12:52:32 -0500 Subject: [PATCH 361/438] autoflex: Add StringEnum capability for MapBlockKeys --- internal/framework/flex/autoflex_test.go | 27 +++++++++++++++--------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/internal/framework/flex/autoflex_test.go b/internal/framework/flex/autoflex_test.go index b30fac0c715..894489f78c9 100644 --- a/internal/framework/flex/autoflex_test.go +++ b/internal/framework/flex/autoflex_test.go @@ -299,7 +299,10 @@ type TestFlexTF18 struct { } type TestFlexMapBlockKeyTF01 struct { - BlockMap fwtypes.ListNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"block_map"` + MapBlock fwtypes.ListNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"map_block"` +} +type TestFlexMapBlockKeyAWS01 struct { + MapBlock map[string]TestFlexMapBlockKeyAWS02 } type TestFlexMapBlockKeyTF02 struct { @@ -307,20 +310,24 @@ type TestFlexMapBlockKeyTF02 struct { Attr1 types.String `tfsdk:"attr1"` Attr2 types.String `tfsdk:"attr2"` } +type TestFlexMapBlockKeyAWS02 struct { + Attr1 string + Attr2 string +} type TestFlexMapBlockKeyTF03 struct { - BlockMap fwtypes.SetNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"block_map"` + MapBlock fwtypes.SetNestedObjectValueOf[TestFlexMapBlockKeyTF02] `tfsdk:"map_block"` } -type TestFlexMapBlockKeyAWS01 struct { - BlockMap map[string]TestFlexMapBlockKeyAWS02 +type TestFlexMapBlockKeyAWS03 struct { + MapBlock map[string]*TestFlexMapBlockKeyAWS02 } -type TestFlexMapBlockKeyAWS02 struct { - Attr1 string - Attr2 string +type TestFlexMapBlockKeyTF04 struct { + MapBlock fwtypes.ListNestedObjectValueOf[TestFlexMapBlockKeyTF05] `tfsdk:"map_block"` } - -type TestFlexMapBlockKeyAWS03 struct { - BlockMap map[string]*TestFlexMapBlockKeyAWS02 +type TestFlexMapBlockKeyTF05 struct { + MapBlockKey fwtypes.StringEnum[TestEnum] `tfsdk:"map_block_key"` + Attr1 types.String `tfsdk:"attr1"` + Attr2 types.String `tfsdk:"attr2"` } From b8b6776aa49a9f2c31891b7c6162df27999a2d7c Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 12:54:07 -0500 Subject: [PATCH 362/438] fw/types: Add way to get value from (zero) value --- internal/framework/types/string_enum.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/framework/types/string_enum.go b/internal/framework/types/string_enum.go index 41d52518eb6..146d0c165e9 100644 --- a/internal/framework/types/string_enum.go +++ b/internal/framework/types/string_enum.go @@ -174,3 +174,9 @@ func (v StringEnum[T]) Type(context.Context) attr.Type { func (v StringEnum[T]) ValueEnum() T { return T(v.ValueString()) } + +// StringEnumValue is useful if you have a zero value StringEnum but need a +// way to get a non-zero value such as when flattening. +func (v StringEnum[T]) StringEnumValue(value string) StringEnum[T] { + return StringEnum[T]{StringValue: basetypes.NewStringValue(value)} +} From 97d818e6574895285575fe1305487e50675d07d7 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 12:55:45 -0500 Subject: [PATCH 363/438] autoflex: Expand can handle StringEnum MapBlockKeys --- internal/framework/flex/auto_expand.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/internal/framework/flex/auto_expand.go b/internal/framework/flex/auto_expand.go index ad2f799470e..73f10cf5ff5 100644 --- a/internal/framework/flex/auto_expand.go +++ b/internal/framework/flex/auto_expand.go @@ -763,9 +763,25 @@ func blockKeyMap(from any) (reflect.Value, diag.Diagnostics) { // go from StringValue to string if field.Name == MapBlockKey { - if v, ok := valFrom.Field(i).Interface().(basetypes.StringValue); ok { + fieldVal := valFrom.Field(i) + + if v, ok := fieldVal.Interface().(basetypes.StringValue); ok { return reflect.ValueOf(v.ValueString()), diags } + + // this handles things like StringEnum which has a ValueString method but is tricky to get a generic instantiation of + fieldType := fieldVal.Type() + method, found := fieldType.MethodByName("ValueString") + if found { + result := fieldType.Method(method.Index).Func.Call([]reflect.Value{fieldVal}) + if len(result) > 0 { + return result[0], diags + } + } + + // this is not ideal but perhaps better than a panic? + // return reflect.ValueOf(fmt.Sprintf("%s", valFrom.Field(i))), diags + return valFrom.Field(i), diags } } From 58b40609d205ce6673403dbfdb3017469e07335a Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 12:56:56 -0500 Subject: [PATCH 364/438] autoflex: Test Expand can handle StringEnum MapBlockKeys --- internal/framework/flex/auto_expand_test.go | 46 +++++++++++++++++---- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/internal/framework/flex/auto_expand_test.go b/internal/framework/flex/auto_expand_test.go index b6ae8721391..55fc1505e07 100644 --- a/internal/framework/flex/auto_expand_test.go +++ b/internal/framework/flex/auto_expand_test.go @@ -578,7 +578,7 @@ func TestExpandGeneric(t *testing.T) { { TestName: "map block key list", Source: &TestFlexMapBlockKeyTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -593,7 +593,7 @@ func TestExpandGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyAWS01{}, WantTarget: &TestFlexMapBlockKeyAWS01{ - BlockMap: map[string]TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -608,7 +608,7 @@ func TestExpandGeneric(t *testing.T) { { TestName: "map block key set", Source: &TestFlexMapBlockKeyTF03{ - BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -623,7 +623,7 @@ func TestExpandGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyAWS01{}, WantTarget: &TestFlexMapBlockKeyAWS01{ - BlockMap: map[string]TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -638,7 +638,7 @@ func TestExpandGeneric(t *testing.T) { { TestName: "map block key ptr source", Source: &TestFlexMapBlockKeyTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -653,7 +653,7 @@ func TestExpandGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyAWS01{}, WantTarget: &TestFlexMapBlockKeyAWS01{ - BlockMap: map[string]TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -668,7 +668,7 @@ func TestExpandGeneric(t *testing.T) { { TestName: "map block key ptr both", Source: &TestFlexMapBlockKeyTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -683,7 +683,7 @@ func TestExpandGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyAWS03{}, WantTarget: &TestFlexMapBlockKeyAWS03{ - BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -695,6 +695,36 @@ func TestExpandGeneric(t *testing.T) { }, }, }, + { + TestName: "map block enum key", + Source: &TestFlexMapBlockKeyTF04{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF05](ctx, []TestFlexMapBlockKeyTF05{ + { + MapBlockKey: fwtypes.StringEnumValue(TestEnumList), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: fwtypes.StringEnumValue(TestEnumScalar), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + Target: &TestFlexMapBlockKeyAWS01{}, + WantTarget: &TestFlexMapBlockKeyAWS01{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ + string(TestEnumList): { + Attr1: "a", + Attr2: "b", + }, + string(TestEnumScalar): { + Attr1: "c", + Attr2: "d", + }, + }, + }, + }, { TestName: "complex nesting", Source: &TestFlexComplexNestTF01{ From cef8ea05bd8768507d77b161e49626a5c928dbc4 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 12:58:22 -0500 Subject: [PATCH 365/438] autoflex: Flatten can handle StringEnum MapBlockKeys --- internal/framework/flex/auto_flatten.go | 89 +++++++------------------ 1 file changed, 25 insertions(+), 64 deletions(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index f29ee88ccc6..5acd8492679 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -741,7 +741,7 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom return diags } - d = blockKeyMapSet(target, key.String()) + d = blockKeyMapSet(target, key) diags.Append(d...) t.Index(i).Set(reflect.ValueOf(target)) @@ -759,67 +759,6 @@ func (flattener autoFlattener) structMapToObjectList(ctx context.Context, vFrom return diags } -/* -func (flattener autoFlattener) structMapToObjectSet(ctx context.Context, vFrom reflect.Value, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { - var diags diag.Diagnostics - - if vFrom.IsNil() { - val, d := tTo.NullValue(ctx) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(val)) - return diags - } - - n := vFrom.Len() - to, d := tTo.NewObjectSlice(ctx, n, n) - diags.Append(d...) - if diags.HasError() { - return diags - } - - t := reflect.ValueOf(to) - - i := 0 - for _, key := range vFrom.MapKeys() { - target, d := tTo.NewObjectPtr(ctx) - diags.Append(d...) - if diags.HasError() { - return diags - } - - fromInterface := vFrom.MapIndex(key).Interface() - if vFrom.MapIndex(key).Kind() == reflect.Ptr { - fromInterface = vFrom.MapIndex(key).Elem().Interface() - } - - diags.Append(autoFlexConvertStruct(ctx, fromInterface, target, flattener)...) - if diags.HasError() { - return diags - } - - d = blockKeyMapSet(target, key.String()) - diags.Append(d...) - - t.Index(i).Set(reflect.ValueOf(target)) - i++ - } - - val, d := tTo.ValueFromObjectSlice(ctx, to) - diags.Append(d...) - if diags.HasError() { - return diags - } - - vTo.Set(reflect.ValueOf(val)) - - return diags -} -*/ - // structToNestedObject copies an AWS API struct value to a compatible Plugin Framework NestedObjectValue value. func (flattener autoFlattener) structToNestedObject(ctx context.Context, vFrom reflect.Value, isNullFrom bool, tTo fwtypes.NestedObjectType, vTo reflect.Value) diag.Diagnostics { var diags diag.Diagnostics @@ -909,7 +848,7 @@ func (flattener autoFlattener) sliceOfStructNestedObject(ctx context.Context, vF } // blockKeyMapSet takes a struct and assigns the value of the `key` -func blockKeyMapSet(to any, key string) diag.Diagnostics { +func blockKeyMapSet(to any, key reflect.Value) diag.Diagnostics { var diags diag.Diagnostics valTo := reflect.ValueOf(to) @@ -933,10 +872,20 @@ func blockKeyMapSet(to any, key string) diag.Diagnostics { } if _, ok := valTo.Field(i).Interface().(basetypes.StringValue); ok { - valTo.Field(i).Set(reflect.ValueOf(basetypes.NewStringValue(key))) + valTo.Field(i).Set(reflect.ValueOf(basetypes.NewStringValue(key.String()))) return diags } + fieldType := valTo.Field(i).Type() + + method, found := fieldType.MethodByName("StringEnumValue") + if found { + result := fieldType.Method(method.Index).Func.Call([]reflect.Value{valTo.Field(i), key}) + if len(result) > 0 { + valTo.Field(i).Set(result[0]) + } + } + return diags } @@ -944,3 +893,15 @@ func blockKeyMapSet(to any, key string) diag.Diagnostics { return diags } + +func attemptMapBlockKeySet(value reflect.Value, newValue interface{}) (err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("panic: %v", r) + } + }() + + // Attempt to set the value + value.Set(reflect.ValueOf(newValue)) + return nil +} From e61d5e7a094e57327cd5c5be69cac0b4ec5ddfa6 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 12:59:41 -0500 Subject: [PATCH 366/438] autoflex: Test Flatten can handle StringEnum MapBlockKeys --- internal/framework/flex/auto_flatten_test.go | 46 ++++++++++++++++---- 1 file changed, 38 insertions(+), 8 deletions(-) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index 69fc1151d4a..deca4c23dad 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -752,7 +752,7 @@ func TestFlattenGeneric(t *testing.T) { { TestName: "map block key list", Source: &TestFlexMapBlockKeyAWS01{ - BlockMap: map[string]TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -761,7 +761,7 @@ func TestFlattenGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyTF01{}, WantTarget: &TestFlexMapBlockKeyTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -773,7 +773,7 @@ func TestFlattenGeneric(t *testing.T) { { TestName: "map block key set", Source: &TestFlexMapBlockKeyAWS01{ - BlockMap: map[string]TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -782,7 +782,7 @@ func TestFlattenGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyTF03{}, WantTarget: &TestFlexMapBlockKeyTF03{ - BlockMap: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewSetNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -794,7 +794,7 @@ func TestFlattenGeneric(t *testing.T) { { TestName: "map block key ptr source", Source: &TestFlexMapBlockKeyAWS03{ - BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -803,7 +803,7 @@ func TestFlattenGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyTF01{}, WantTarget: &TestFlexMapBlockKeyTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF02](ctx, []TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -815,7 +815,7 @@ func TestFlattenGeneric(t *testing.T) { { TestName: "map block key ptr both", Source: &TestFlexMapBlockKeyAWS03{ - BlockMap: map[string]*TestFlexMapBlockKeyAWS02{ + MapBlock: map[string]*TestFlexMapBlockKeyAWS02{ "x": { Attr1: "a", Attr2: "b", @@ -824,7 +824,7 @@ func TestFlattenGeneric(t *testing.T) { }, Target: &TestFlexMapBlockKeyTF01{}, WantTarget: &TestFlexMapBlockKeyTF01{ - BlockMap: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ + MapBlock: fwtypes.NewListNestedObjectValueOfSlice(ctx, []*TestFlexMapBlockKeyTF02{ { MapBlockKey: types.StringValue("x"), Attr1: types.StringValue("a"), @@ -833,6 +833,36 @@ func TestFlattenGeneric(t *testing.T) { }), }, }, + { + TestName: "map block enum key", + Source: &TestFlexMapBlockKeyAWS01{ + MapBlock: map[string]TestFlexMapBlockKeyAWS02{ + string(TestEnumList): { + Attr1: "a", + Attr2: "b", + }, + string(TestEnumScalar): { + Attr1: "c", + Attr2: "d", + }, + }, + }, + Target: &TestFlexMapBlockKeyTF04{}, + WantTarget: &TestFlexMapBlockKeyTF04{ + MapBlock: fwtypes.NewListNestedObjectValueOfValueSlice[TestFlexMapBlockKeyTF05](ctx, []TestFlexMapBlockKeyTF05{ + { + MapBlockKey: fwtypes.StringEnumValue(TestEnumList), + Attr1: types.StringValue("a"), + Attr2: types.StringValue("b"), + }, + { + MapBlockKey: fwtypes.StringEnumValue(TestEnumScalar), + Attr1: types.StringValue("c"), + Attr2: types.StringValue("d"), + }, + }), + }, + }, { TestName: "complex nesting", Source: &TestFlexComplexNestAWS01{ From f0e497c1dbaf97b8a35b9edc2232ab618c00193e Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 13:00:29 -0500 Subject: [PATCH 367/438] autoflex: Remove unused code --- internal/framework/flex/auto_flatten.go | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/internal/framework/flex/auto_flatten.go b/internal/framework/flex/auto_flatten.go index 5acd8492679..6f7891d2892 100644 --- a/internal/framework/flex/auto_flatten.go +++ b/internal/framework/flex/auto_flatten.go @@ -893,15 +893,3 @@ func blockKeyMapSet(to any, key reflect.Value) diag.Diagnostics { return diags } - -func attemptMapBlockKeySet(value reflect.Value, newValue interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("panic: %v", r) - } - }() - - // Attempt to set the value - value.Set(reflect.ValueOf(newValue)) - return nil -} From 01414baf959bbd825b2364d83b9ad8e9f602f052 Mon Sep 17 00:00:00 2001 From: Dirk Avery Date: Wed, 20 Dec 2023 13:26:39 -0500 Subject: [PATCH 368/438] autoflex/test: Not guaranteed order --- internal/framework/flex/auto_flatten_test.go | 9 --------- 1 file changed, 9 deletions(-) diff --git a/internal/framework/flex/auto_flatten_test.go b/internal/framework/flex/auto_flatten_test.go index deca4c23dad..d6439c14715 100644 --- a/internal/framework/flex/auto_flatten_test.go +++ b/internal/framework/flex/auto_flatten_test.go @@ -841,10 +841,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: "a", Attr2: "b", }, - string(TestEnumScalar): { - Attr1: "c", - Attr2: "d", - }, }, }, Target: &TestFlexMapBlockKeyTF04{}, @@ -855,11 +851,6 @@ func TestFlattenGeneric(t *testing.T) { Attr1: types.StringValue("a"), Attr2: types.StringValue("b"), }, - { - MapBlockKey: fwtypes.StringEnumValue(TestEnumScalar), - Attr1: types.StringValue("c"), - Attr2: types.StringValue("d"), - }, }), }, }, From a980edfc26bea0b1161d3568518566d41b01bf5e Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Dec 2023 11:06:18 -0800 Subject: [PATCH 369/438] Fixes doc URL --- docs/add-a-new-service.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/add-a-new-service.md b/docs/add-a-new-service.md index 6dd33d9c0ac..332790143c4 100644 --- a/docs/add-a-new-service.md +++ b/docs/add-a-new-service.md @@ -70,7 +70,7 @@ Once the service client has been added, implement the first [resource](./add-a-n If an AWS service must be created in a non-standard way, for example the service API's endpoint must be accessed via a single AWS Region, then: -1. Add an `x` in the **SkipClientGenerate** column for the service in [`names/data/names_data.csv`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/data/README.md) +1. Add an `x` in the **SkipClientGenerate** column for the service in [`names/data/names_data.csv`](https://github.com/hashicorp/terraform-provider-aws/blob/main/names/README.md) 1. Run `make gen` From c95fffbb4f29a9408e223ccfb9e5bd648b83a83f Mon Sep 17 00:00:00 2001 From: changelogbot Date: Wed, 20 Dec 2023 19:14:19 +0000 Subject: [PATCH 370/438] Update CHANGELOG.md for #35016 --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fca56ffd4b..8cc833d15aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,12 +6,18 @@ FEATURES: ENHANCEMENTS: +* data-source/aws_batch_compute_environment: Add `update_policy` attribute ([#34353](https://github.com/hashicorp/terraform-provider-aws/issues/34353)) * data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) +* resource/aws_batch_compute_environment: Add `update_policy` parameter ([#34353](https://github.com/hashicorp/terraform-provider-aws/issues/34353)) +* resource/aws_dms_replication_task: Allow `cdc_start_time` to use [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) formatted dates in addition to UNIX timestamps ([#31917](https://github.com/hashicorp/terraform-provider-aws/issues/31917)) +* resource/aws_dms_replication_task: Remove [ForceNew](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#forcenew) from `replication_instance_arn`, allowing in-place migration between DMS instances ([#30721](https://github.com/hashicorp/terraform-provider-aws/issues/30721)) * resource/aws_s3_bucket: Modify server-side encryption configuration error handling, enabling support for NetApp StorageGRID ([#34890](https://github.com/hashicorp/terraform-provider-aws/issues/34890)) BUG FIXES: * data-source/aws_lb_target_group: Change `deregistration_delay` from `TypeInt` to `TypeString` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) +* resource/aws_dms_replication_config: Prevent erroneous diffs on `replication_settings` ([#34356](https://github.com/hashicorp/terraform-provider-aws/issues/34356)) +* resource/aws_dms_replication_task: Prevent erroneous diffs on `replication_task_settings` ([#34356](https://github.com/hashicorp/terraform-provider-aws/issues/34356)) * resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created ([#34848](https://github.com/hashicorp/terraform-provider-aws/issues/34848)) * resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) * resource/aws_memorydb_cluster: Treat `snapshotting` status as pending when creating cluster ([#31077](https://github.com/hashicorp/terraform-provider-aws/issues/31077)) From ccdfc7b105133a196f63e02c187fe144a270d40a Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Dec 2023 15:45:04 -0800 Subject: [PATCH 371/438] Linting fixes --- names/data/read.go | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/names/data/read.go b/names/data/read.go index b702ca71656..c49be7c5402 100644 --- a/names/data/read.go +++ b/names/data/read.go @@ -7,6 +7,7 @@ import ( "bytes" _ "embed" "encoding/csv" + "errors" "io" "strings" ) @@ -144,15 +145,18 @@ func ReadAllServiceData() (results []ServiceRecord, err error) { // reader.ReuseRecord = true // Skip the header - reader.Read() + _, err = reader.Read() + if err != nil { + return + } for { r, err := reader.Read() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } if err != nil { - return nil, nil + return nil, err } results = append(results, ServiceRecord(r)) } From 3b87b53562bd6fc84e637c1d03a2912704c0f9d6 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Dec 2023 17:27:30 -0800 Subject: [PATCH 372/438] Fixes bug where changes to `rotation_rules.automatically_after_days` was ignored if `rotation_rules.schedule_expression` was set --- .../service/secretsmanager/secret_rotation.go | 18 +++---- .../secretsmanager/secret_rotation_test.go | 54 +++++++++++++++++++ 2 files changed, 62 insertions(+), 10 deletions(-) diff --git a/internal/service/secretsmanager/secret_rotation.go b/internal/service/secretsmanager/secret_rotation.go index 15ba3a5ae4c..b6be0ce716d 100644 --- a/internal/service/secretsmanager/secret_rotation.go +++ b/internal/service/secretsmanager/secret_rotation.go @@ -12,6 +12,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -53,12 +54,7 @@ func ResourceSecretRotation() *schema.Resource { Optional: true, ConflictsWith: []string{"rotation_rules.0.schedule_expression"}, ExactlyOneOf: []string{"rotation_rules.0.automatically_after_days", "rotation_rules.0.schedule_expression"}, - DiffSuppressFunc: func(k, old, new string, d *schema.ResourceData) bool { - _, exists := d.GetOk("rotation_rules.0.schedule_expression") - return exists - }, - DiffSuppressOnRefresh: true, - ValidateFunc: validation.IntBetween(1, 1000), + ValidateFunc: validation.IntBetween(1, 1000), }, "duration": { Type: schema.TypeString, @@ -90,8 +86,9 @@ func resourceSecretRotationCreate(ctx context.Context, d *schema.ResourceData, m secretID := d.Get("secret_id").(string) input := &secretsmanager.RotateSecretInput{ - RotationRules: expandRotationRules(d.Get("rotation_rules").([]interface{})), - SecretId: aws.String(secretID), + ClientRequestToken: aws.String(id.UniqueId()), // Needed because we're handling our own retries + RotationRules: expandRotationRules(d.Get("rotation_rules").([]interface{})), + SecretId: aws.String(secretID), } if v, ok := d.GetOk("rotation_lambda_arn"); ok { @@ -154,8 +151,9 @@ func resourceSecretRotationUpdate(ctx context.Context, d *schema.ResourceData, m if d.HasChanges("rotation_lambda_arn", "rotation_rules") { input := &secretsmanager.RotateSecretInput{ - RotationRules: expandRotationRules(d.Get("rotation_rules").([]interface{})), - SecretId: aws.String(secretID), + ClientRequestToken: aws.String(id.UniqueId()), // Needed because we're handling our own retries + RotationRules: expandRotationRules(d.Get("rotation_rules").([]interface{})), + SecretId: aws.String(secretID), } if v, ok := d.GetOk("rotation_lambda_arn"); ok { diff --git a/internal/service/secretsmanager/secret_rotation_test.go b/internal/service/secretsmanager/secret_rotation_test.go index b13328f2413..fc1d29c1f93 100644 --- a/internal/service/secretsmanager/secret_rotation_test.go +++ b/internal/service/secretsmanager/secret_rotation_test.go @@ -41,6 +41,8 @@ func TestAccSecretsManagerSecretRotation_basic(t *testing.T) { resource.TestCheckResourceAttrPair(resourceName, "rotation_lambda_arn", lambdaFunctionResourceName, "arn"), resource.TestCheckResourceAttr(resourceName, "rotation_rules.#", "1"), resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.automatically_after_days", strconv.Itoa(days)), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.duration", ""), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.schedule_expression", ""), ), }, { @@ -95,6 +97,53 @@ func TestAccSecretsManagerSecretRotation_scheduleExpression(t *testing.T) { }) } +func TestAccSecretsManagerSecretRotation_scheduleExpressionToDays(t *testing.T) { + ctx := acctest.Context(t) + var secret secretsmanager.DescribeSecretOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + const ( + resourceName = "aws_secretsmanager_secret_rotation.test" + lambdaFunctionResourceName = "aws_lambda_function.test" + scheduleExpression = "rate(10 days)" + days = 7 + ) + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, secretsmanager.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckSecretRotationDestroy(ctx), + Steps: []resource.TestStep{ + { + Config: testAccSecretRotationConfig_scheduleExpression(rName, scheduleExpression), + Check: resource.ComposeTestCheckFunc( + testAccCheckSecretRotationExists(ctx, resourceName, &secret), + resource.TestCheckResourceAttr(resourceName, "rotation_enabled", "true"), + resource.TestCheckResourceAttrPair(resourceName, "rotation_lambda_arn", lambdaFunctionResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.schedule_expression", scheduleExpression), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.automatically_after_days", "0"), + ), + }, + { + Config: testAccSecretRotationConfig_basic(rName, days), + Check: resource.ComposeTestCheckFunc( + testAccCheckSecretRotationExists(ctx, resourceName, &secret), + resource.TestCheckResourceAttr(resourceName, "rotation_enabled", "true"), + resource.TestCheckResourceAttrPair(resourceName, "rotation_lambda_arn", lambdaFunctionResourceName, "arn"), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.#", "1"), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.automatically_after_days", strconv.Itoa(days)), + resource.TestCheckResourceAttr(resourceName, "rotation_rules.0.schedule_expression", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + func TestAccSecretsManagerSecretRotation_scheduleExpressionHours(t *testing.T) { ctx := acctest.Context(t) var secret secretsmanager.DescribeSecretOutput @@ -295,6 +344,11 @@ resource "aws_secretsmanager_secret" "test" { name = %[1]q } +resource "aws_secretsmanager_secret_version" "test" { + secret_id = aws_secretsmanager_secret.test.id + secret_string = "test-string" +} + resource "aws_secretsmanager_secret_rotation" "test" { secret_id = aws_secretsmanager_secret.test.id rotation_lambda_arn = aws_lambda_function.test.arn From 13ba5b3dc48052bf51b7f949079a35b89d244103 Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Dec 2023 17:35:22 -0800 Subject: [PATCH 373/438] Sets `ClientRequestToken` for idempotent operations --- internal/service/secretsmanager/secret.go | 7 +++++-- internal/service/secretsmanager/secret_version.go | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/internal/service/secretsmanager/secret.go b/internal/service/secretsmanager/secret.go index d2fa147abbf..97e0da3fac8 100644 --- a/internal/service/secretsmanager/secret.go +++ b/internal/service/secretsmanager/secret.go @@ -15,6 +15,7 @@ import ( "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/structure" @@ -142,6 +143,7 @@ func resourceSecretCreate(ctx context.Context, d *schema.ResourceData, meta inte secretName := create.Name(d.Get("name").(string), d.Get("name_prefix").(string)) input := &secretsmanager.CreateSecretInput{ + ClientRequestToken: aws.String(id.UniqueId()), // Needed because we're handling our own retries Description: aws.String(d.Get("description").(string)), ForceOverwriteReplicaSecret: aws.Bool(d.Get("force_overwrite_replica_secret").(bool)), Name: aws.String(secretName), @@ -313,8 +315,9 @@ func resourceSecretUpdate(ctx context.Context, d *schema.ResourceData, meta inte if d.HasChanges("description", "kms_key_id") { input := &secretsmanager.UpdateSecretInput{ - Description: aws.String(d.Get("description").(string)), - SecretId: aws.String(d.Id()), + ClientRequestToken: aws.String(id.UniqueId()), // Needed because we're handling our own retries + Description: aws.String(d.Get("description").(string)), + SecretId: aws.String(d.Id()), } if v, ok := d.GetOk("kms_key_id"); ok { diff --git a/internal/service/secretsmanager/secret_version.go b/internal/service/secretsmanager/secret_version.go index e088430315b..57694b94950 100644 --- a/internal/service/secretsmanager/secret_version.go +++ b/internal/service/secretsmanager/secret_version.go @@ -14,6 +14,7 @@ import ( "github.com/aws/aws-sdk-go/service/secretsmanager" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/retry" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-provider-aws/internal/conns" @@ -78,7 +79,8 @@ func resourceSecretVersionCreate(ctx context.Context, d *schema.ResourceData, me secretID := d.Get("secret_id").(string) input := &secretsmanager.PutSecretValueInput{ - SecretId: aws.String(secretID), + ClientRequestToken: aws.String(id.UniqueId()), // Needed because we're handling our own retries + SecretId: aws.String(secretID), } if v, ok := d.GetOk("secret_string"); ok { From 92c991e5404ecd5b0c73eca66728c900170babdc Mon Sep 17 00:00:00 2001 From: Graham Davison Date: Wed, 20 Dec 2023 18:06:22 -0800 Subject: [PATCH 374/438] Adds CHANGELOG entry --- .changelog/35024.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/35024.txt diff --git a/.changelog/35024.txt b/.changelog/35024.txt new file mode 100644 index 00000000000..f4b6a94f92b --- /dev/null +++ b/.changelog/35024.txt @@ -0,0 +1,3 @@ +```release-note:bug +resource/aws_secretsmanager_secret_rotation: No longer ignores changes to `rotation_rules.automatically_after_days` when `rotation_rules.schedule_expression` is set. +``` From 217d2ce2b130751a8a4f93c63fdd3d38d4faefcf Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 09:14:01 -0500 Subject: [PATCH 375/438] CDKTF documentation now generated elsewhere. --- .github/workflows/cdktf-documentation.yml | 38 ----------------------- 1 file changed, 38 deletions(-) delete mode 100644 .github/workflows/cdktf-documentation.yml diff --git a/.github/workflows/cdktf-documentation.yml b/.github/workflows/cdktf-documentation.yml deleted file mode 100644 index 287dd94a080..00000000000 --- a/.github/workflows/cdktf-documentation.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: CDKTF Documentation -on: - schedule: - - cron: "0 0 * * WED" - workflow_dispatch: {} - -permissions: - contents: write - pull-requests: write - -jobs: - generateToken: - runs-on: ubuntu-latest - outputs: - token: ${{ steps.generate_token.outputs.token }} - steps: - - name: Generate Token - id: generate_token - uses: tibdex/github-app-token@3beb63f4bd073e61482598c45c71c1019b59b73a # v2.1.0 - with: - app_id: ${{ secrets.APP_ID }} - installation_retrieval_mode: id - installation_retrieval_payload: ${{ secrets.INSTALLATION_ID }} - private_key: ${{secrets.APP_PEM }} - - cdktfDocs: - needs: - - generateToken - uses: hashicorp/terraform-cdk/.github/workflows/registry-docs-pr-based.yml@304e2507209c9657135dc7b4b7ee68030327468f - secrets: - GH_PR_TOKEN: ${{ needs.generateToken.outputs.token }} - with: - providerFqn: "hashicorp/aws" - languages: "typescript,python" - files: "d/*.html.markdown,r/*.html.markdown" - parallelFileConversions: 1 - maxRunners: 20 - cdktfRegistryDocsVersion: "1.14.2" From d06245475724de8586d1abeef17cae4b806017cb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 09:21:56 -0500 Subject: [PATCH 376/438] Add 'names.CAWest1RegionID'. --- names/names.go | 1 + 1 file changed, 1 insertion(+) diff --git a/names/names.go b/names/names.go index 4877b40dc7b..d2f7cc323af 100644 --- a/names/names.go +++ b/names/names.go @@ -124,6 +124,7 @@ const ( APSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). APSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CACentral1RegionID = "ca-central-1" // Canada (Central). + CAWest1RegionID = "ca-west-1" // Canada West (Calgary). EUCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EUCentral2RegionID = "eu-central-2" // Europe (Zurich). EUNorth1RegionID = "eu-north-1" // Europe (Stockholm). From 8dc681f038e84b9ad1cfa5b46f66f4ff317123e4 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 21 Dec 2023 16:29:40 +0000 Subject: [PATCH 377/438] Update CHANGELOG.md for #34998 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8cc833d15aa..edd577dc579 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,6 +19,7 @@ BUG FIXES: * resource/aws_dms_replication_config: Prevent erroneous diffs on `replication_settings` ([#34356](https://github.com/hashicorp/terraform-provider-aws/issues/34356)) * resource/aws_dms_replication_task: Prevent erroneous diffs on `replication_task_settings` ([#34356](https://github.com/hashicorp/terraform-provider-aws/issues/34356)) * resource/aws_dynamodb_table: Fix error when waiting for snapshot to be created ([#34848](https://github.com/hashicorp/terraform-provider-aws/issues/34848)) +* resource/aws_finspace_kx_dataview: Properly set `arn` attribute on read, resolving persistent differences when `tags` are configured ([#34998](https://github.com/hashicorp/terraform-provider-aws/issues/34998)) * resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) * resource/aws_memorydb_cluster: Treat `snapshotting` status as pending when creating cluster ([#31077](https://github.com/hashicorp/terraform-provider-aws/issues/31077)) * resource/aws_ssoadmin_application: Fix `portal_options.sign_in_options.application_url` triggering `ValidationError` when unset ([#34967](https://github.com/hashicorp/terraform-provider-aws/issues/34967)) From 15fc0735152f66f162014f0e521276ac5ee49e53 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 13:57:06 -0500 Subject: [PATCH 378/438] r/aws_s3_bucket: Tidy up Update. --- internal/service/s3/bucket.go | 1287 +++++++++-------- .../s3/bucket_object_lock_configuration.go | 18 +- .../s3/bucket_replication_configuration.go | 2 +- ...et_server_side_encryption_configuration.go | 19 +- 4 files changed, 676 insertions(+), 650 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 4f80e62e66e..5e6c0cb957d 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -36,6 +36,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/flex" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" + itypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" "golang.org/x/exp/slices" @@ -752,8 +753,7 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta inte } // S3 Object Lock can only be enabled on bucket creation. - objectLockConfiguration := expandObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) - if objectLockConfiguration != nil && objectLockConfiguration.ObjectLockEnabled == types.ObjectLockEnabledEnabled { + if v := expandBucketObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})); v != nil && v.ObjectLockEnabled == types.ObjectLockEnabledEnabled { input.ObjectLockEnabledForBucket = aws.Bool(true) } @@ -807,7 +807,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Policy. // - // Read the policy if configured outside this resource e.g. with aws_s3_bucket_policy resource. policy, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (string, error) { return findBucketPolicy(ctx, conn, d.Id()) @@ -839,7 +838,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket ACL. // - bucketACL, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketAclOutput, error) { return findBucketACL(ctx, conn, d.Id(), "") }) @@ -864,7 +862,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket CORS Configuration. // - corsRules, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() ([]types.CORSRule, error) { return findCORSRules(ctx, conn, d.Id(), "") }) @@ -889,7 +886,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Website Configuration. // - bucketWebsite, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketWebsiteOutput, error) { return findBucketWebsite(ctx, conn, d.Id(), "") }) @@ -918,7 +914,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Versioning. // - bucketVersioning, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketVersioningOutput, error) { return findBucketVersioning(ctx, conn, d.Id(), "") }) @@ -943,7 +938,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Accelerate Configuration. // - bucketAccelerate, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketAccelerateConfigurationOutput, error) { return findBucketAccelerateConfiguration(ctx, conn, d.Id(), "") }) @@ -966,7 +960,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Request Payment Configuration. // - bucketRequestPayment, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*s3.GetBucketRequestPaymentOutput, error) { return findBucketRequestPayment(ctx, conn, d.Id(), "") }) @@ -989,7 +982,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Logging. // - loggingEnabled, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.LoggingEnabled, error) { return findLoggingEnabled(ctx, conn, d.Id(), "") }) @@ -1014,7 +1006,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Lifecycle Configuration. // - lifecycleRules, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() ([]types.LifecycleRule, error) { return findLifecycleRules(ctx, conn, d.Id(), "") }) @@ -1039,7 +1030,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Replication Configuration. // - replicationConfiguration, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.ReplicationConfiguration, error) { return findReplicationConfiguration(ctx, conn, d.Id()) }) @@ -1064,7 +1054,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Server-side Encryption Configuration. // - encryptionConfiguration, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.ServerSideEncryptionConfiguration, error) { return findServerSideEncryptionConfiguration(ctx, conn, d.Id(), "") }) @@ -1089,7 +1078,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Object Lock Configuration. // - objLockConfig, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (*types.ObjectLockConfiguration, error) { return findObjectLockConfiguration(ctx, conn, d.Id(), "") }) @@ -1121,7 +1109,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Region etc. // - region, err := manager.GetBucketRegion(ctx, conn, d.Id(), func(o *s3.Options) { o.UsePathStyle = meta.(*conns.AWSClient).S3UsePathStyle() }) @@ -1158,7 +1145,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf // // Bucket Tags. // - tags, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutRead), func() (tftags.KeyValueTags, error) { return BucketListTags(ctx, conn, d.Id()) }) @@ -1184,8 +1170,11 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte var diags diag.Diagnostics conn := meta.(*conns.AWSClient).S3Client(ctx) - // Note: Order of argument updates below is important + // Note: Order of argument updates below is important. + // + // Bucket Policy. + // if d.HasChange("policy") { policy, err := structure.NormalizeJsonString(d.Get("policy").(string)) if err != nil { @@ -1218,8 +1207,11 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte } } + // + // Bucket CORS Configuration. + // if d.HasChange("cors_rule") { - if v, ok := d.GetOk("cors_rule"); ok && len(v.([]interface{})) == 0 { + if v, ok := d.GetOk("cors_rule"); !ok || len(v.([]interface{})) == 0 { _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { return conn.DeleteBucketCors(ctx, &s3.DeleteBucketCorsInput{ Bucket: aws.String(d.Id()), @@ -1247,8 +1239,11 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte } } + // + // Bucket Website Configuration. + // if d.HasChange("website") { - if v, ok := d.GetOk("website"); ok && len(v.([]interface{})) == 0 || v.([]interface{})[0] == nil { + if v, ok := d.GetOk("website"); !ok || len(v.([]interface{})) == 0 || v.([]interface{})[0] == nil { _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { return conn.DeleteBucketWebsite(ctx, &s3.DeleteBucketWebsiteInput{ Bucket: aws.String(d.Id()), @@ -1259,7 +1254,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) website configuration: %s", d.Id(), err) } } else { - websiteConfig, err := expandBucketWebsiteConfiguration(v.([]interface{})[0].(map[string]interface{})) + websiteConfig, err := expandBucketWebsiteConfiguration(v.([]interface{})) if err != nil { return sdkdiag.AppendFromErr(diags, err) } @@ -1279,77 +1274,294 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte } } + // + // Bucket Versioning. + // if d.HasChange("versioning") { v := d.Get("versioning").([]interface{}) + var versioningConfig *types.VersioningConfiguration if d.IsNewResource() { - if versioning := expandVersioningWhenIsNewResource(v); versioning != nil { - err := resourceBucketInternalVersioningUpdate(ctx, conn, d.Id(), versioning, d.Timeout(schema.TimeoutUpdate)) - if err != nil { - return sdkdiag.AppendErrorf(diags, "updating (new) S3 Bucket (%s) Versioning: %s", d.Id(), err) - } - } + versioningConfig = expandBucketVersioningConfigurationCreate(v) } else { - if err := resourceBucketInternalVersioningUpdate(ctx, conn, d.Id(), expandVersioning(v), d.Timeout(schema.TimeoutUpdate)); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Versioning: %s", d.Id(), err) - } + versioningConfig = expandBucketVersioningConfigurationUpdate(v) + } + + input := &s3.PutBucketVersioningInput{ + Bucket: aws.String(d.Id()), + VersioningConfiguration: versioningConfig, + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketVersioning(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) versioning: %s", d.Id(), err) } } - if d.HasChange("acl") && !d.IsNewResource() { - if err := resourceBucketInternalACLUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) ACL: %s", d.Id(), err) + // + // Bucket ACL. + // + if (d.HasChange("acl") && !d.IsNewResource()) || (d.HasChange("grant") && d.Get("grant").(*schema.Set).Len() == 0) { + acl := types.BucketCannedACL(d.Get("acl").(string)) + if acl == "" { + // Use default value previously available in v3.x of the provider. + acl = types.BucketCannedACLPrivate + } + input := &s3.PutBucketAclInput{ + ACL: acl, + Bucket: aws.String(d.Id()), + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketAcl(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) ACL: %s", d.Id(), err) } } - if d.HasChange("grant") { - if err := resourceBucketInternalGrantsUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Grants: %s", d.Id(), err) + if d.HasChange("grant") && d.Get("grant").(*schema.Set).Len() > 0 { + bucketACL, err := retryWhenNoSuchBucketError(ctx, d.Timeout(schema.TimeoutUpdate), func() (*s3.GetBucketAclOutput, error) { + return findBucketACL(ctx, conn, d.Id(), "") + }) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "reading S3 Bucket (%s) ACL: %s", d.Id(), err) + } + + input := &s3.PutBucketAclInput{ + AccessControlPolicy: &types.AccessControlPolicy{ + Grants: expandGrants(d.Get("grant").(*schema.Set).List()), + Owner: bucketACL.Owner, + }, + Bucket: aws.String(d.Id()), + } + + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketAcl(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) ACL: %s", d.Id(), err) } } + // + // Bucket Logging. + // if d.HasChange("logging") { - if err := resourceBucketInternalLoggingUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Logging: %s", d.Id(), err) + input := &s3.PutBucketLoggingInput{ + Bucket: aws.String(d.Id()), + BucketLoggingStatus: &types.BucketLoggingStatus{}, + } + + if v, ok := d.GetOk("logging"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) + + input.BucketLoggingStatus.LoggingEnabled = &types.LoggingEnabled{} + + if v, ok := tfMap["target_bucket"].(string); ok && v != "" { + input.BucketLoggingStatus.LoggingEnabled.TargetBucket = aws.String(v) + } + + if v, ok := tfMap["target_prefix"].(string); ok && v != "" { + input.BucketLoggingStatus.LoggingEnabled.TargetPrefix = aws.String(v) + } + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketLogging(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) logging: %s", d.Id(), err) } } + // + // Bucket Lifecycle Configuration. + // if d.HasChange("lifecycle_rule") { - if err := resourceBucketInternalLifecycleUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Lifecycle Rules: %s", d.Id(), err) + if v, ok := d.GetOk("lifecycle_rule"); !ok || len(v.([]interface{})) == 0 { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.DeleteBucketLifecycle(ctx, &s3.DeleteBucketLifecycleInput{ + Bucket: aws.String(d.Id()), + }) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) lifecycle configuration: %s", d.Id(), err) + } + } else { + input := &s3.PutBucketLifecycleConfigurationInput{ + Bucket: aws.String(d.Id()), + LifecycleConfiguration: &types.BucketLifecycleConfiguration{ + Rules: expandBucketLifecycleRules(ctx, v.([]interface{})), + }, + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketLifecycleConfiguration(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) lifecycle configuration: %s", d.Id(), err) + } } } + // + // Bucket Accelerate Configuration. + // if d.HasChange("acceleration_status") { - if err := resourceBucketInternalAccelerationUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Acceleration Status: %s", d.Id(), err) + input := &s3.PutBucketAccelerateConfigurationInput{ + AccelerateConfiguration: &types.AccelerateConfiguration{ + Status: types.BucketAccelerateStatus(d.Get("acceleration_status").(string)), + }, + Bucket: aws.String(d.Id()), + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketAccelerateConfiguration(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) accelerate configuration: %s", d.Id(), err) } } + // + // Bucket Request Payment Configuration. + // if d.HasChange("request_payer") { - if err := resourceBucketInternalRequestPayerUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Request Payer: %s", d.Id(), err) + input := &s3.PutBucketRequestPaymentInput{ + Bucket: aws.String(d.Id()), + RequestPaymentConfiguration: &types.RequestPaymentConfiguration{ + Payer: types.Payer(d.Get("request_payer").(string)), + }, + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketRequestPayment(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) request payment configuration: %s", d.Id(), err) } } + // + // Bucket Replication Configuration. + // if d.HasChange("replication_configuration") { - if err := resourceBucketInternalReplicationConfigurationUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Replication configuration: %s", d.Id(), err) + if v, ok := d.GetOk("replication_configuration"); !ok || len(v.([]interface{})) == 0 || v.([]interface{})[0] == nil { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.DeleteBucketReplication(ctx, &s3.DeleteBucketReplicationInput{ + Bucket: aws.String(d.Id()), + }) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) replication configuration: %s", d.Id(), err) + } + } else { + hasVersioning := false + + // Validate that bucket versioning is enabled. + if v, ok := d.GetOk("versioning"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { + tfMap := v.([]interface{})[0].(map[string]interface{}) + + if tfMap["enabled"].(bool) { + hasVersioning = true + } + } + + if !hasVersioning { + return sdkdiag.AppendErrorf(diags, "versioning must be enabled on S3 Bucket (%s) to allow replication", d.Id()) + } + + input := &s3.PutBucketReplicationInput{ + Bucket: aws.String(d.Id()), + ReplicationConfiguration: expandBucketReplicationConfiguration(ctx, v.([]interface{})), + } + + _, err := tfresource.RetryWhen(ctx, d.Timeout(schema.TimeoutUpdate), + func() (interface{}, error) { + return conn.PutBucketReplication(ctx, input) + }, + func(err error) (bool, error) { + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, errCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { + return true, err + } + + return false, err + }, + ) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) replication configuration: %s", d.Id(), err) + } } } + // + // Bucket Server-side Encryption Configuration. + // if d.HasChange("server_side_encryption_configuration") { - if err := resourceBucketInternalServerSideEncryptionConfigurationUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Server-side Encryption configuration: %s", d.Id(), err) + if v, ok := d.GetOk("replication_configuration"); !ok || len(v.([]interface{})) == 0 { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{ + Bucket: aws.String(d.Id()), + }) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket (%s) server-side encryption configuration: %s", d.Id(), err) + } + } else { + input := &s3.PutBucketEncryptionInput{ + Bucket: aws.String(d.Id()), + ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ + Rules: expandBucketServerSideEncryptionRules(v.([]interface{})), + }, + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketEncryption(ctx, input) + }, errCodeNoSuchBucket, errCodeOperationAborted) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) server-side encryption configuration: %s", d.Id(), err) + } } } + // + // Bucket Object Lock Configuration. + // if d.HasChange("object_lock_configuration") { - if err := resourceBucketInternalObjectLockConfigurationUpdate(ctx, conn, d); err != nil { - return sdkdiag.AppendErrorf(diags, "updating S3 Bucket (%s) Object Lock configuration: %s", d.Id(), err) + // S3 Object Lock configuration cannot be deleted, only updated. + input := &s3.PutObjectLockConfigurationInput{ + Bucket: aws.String(d.Id()), + ObjectLockConfiguration: expandBucketObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})), + } + + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutObjectLockConfiguration(ctx, input) + }, errCodeNoSuchBucket) + + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) object lock configuration: %s", d.Id(), err) } } + // + // Bucket Tags. + // if d.HasChange("tags_all") { o, n := d.GetChange("tags_all") @@ -1385,9 +1597,8 @@ func resourceBucketDelete(ctx context.Context, d *schema.ResourceData, meta inte // Delete everything including locked objects. // Don't ignore any object errors or we could recurse infinitely. var objectLockEnabled bool - objectLockConfiguration := expandObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})) - if objectLockConfiguration != nil { - objectLockEnabled = objectLockConfiguration.ObjectLockEnabled == types.ObjectLockEnabledEnabled + if v := expandBucketObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})); v != nil { + objectLockEnabled = v.ObjectLockEnabled == types.ObjectLockEnabledEnabled } if n, err := emptyBucket(ctx, conn, d.Id(), objectLockEnabled); err != nil { @@ -1440,487 +1651,56 @@ func retryWhenNoSuchBucketError[T any](ctx context.Context, timeout time.Duratio return f() }, errCodeNoSuchBucket) - if err != nil { - var zero T - return zero, err - } - - return outputRaw.(T), nil -} - -// https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region -func bucketRegionalDomainName(bucket, region string) string { - // Return a default AWS Commercial domain name if no Region is provided. - if region == "" { - return fmt.Sprintf("%s.s3.amazonaws.com", bucket) //lintignore:AWSR001 - } - return fmt.Sprintf("%s.s3.%s.%s", bucket, region, names.DNSSuffixForPartition(names.PartitionForRegion(region))) -} - -func bucketWebsiteEndpointAndDomain(bucket, region string) (string, string) { - var domain string - - // Default to us-east-1 if the bucket doesn't have a region: - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html - if region == "" { - region = names.USEast1RegionID - } - - // Different regions have different syntax for website endpoints: - // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html - // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints - oldRegions := []string{ - names.APNortheast1RegionID, - names.APSoutheast1RegionID, - names.APSoutheast2RegionID, - names.EUWest1RegionID, - names.SAEast1RegionID, - names.USEast1RegionID, - names.USGovWest1RegionID, - names.USWest1RegionID, - names.USWest2RegionID, - } - if slices.Contains(oldRegions, region) { - domain = fmt.Sprintf("s3-website-%s.amazonaws.com", region) //lintignore:AWSR001 - } else { - dnsSuffix := names.DNSSuffixForPartition(names.PartitionForRegion(region)) - domain = fmt.Sprintf("s3-website.%s.%s", region, dnsSuffix) - } - - return fmt.Sprintf("%s.%s", bucket, domain), domain -} - -////////////////////////////////////////// Argument-Specific Update Functions ////////////////////////////////////////// - -func resourceBucketInternalAccelerationUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - input := &s3.PutBucketAccelerateConfigurationInput{ - Bucket: aws.String(d.Id()), - AccelerateConfiguration: &s3.AccelerateConfiguration{ - Status: aws.String(d.Get("acceleration_status").(string)), - }, - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketAccelerateConfigurationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalACLUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - acl := d.Get("acl").(string) - if acl == "" { - // Use default value previously available in v3.x of the provider - acl = s3.BucketCannedACLPrivate - } - - input := &s3.PutBucketAclInput{ - Bucket: aws.String(d.Id()), - ACL: aws.String(acl), - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketAclWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalGrantsUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - grants := d.Get("grant").(*schema.Set) - - if grants.Len() == 0 { - log.Printf("[DEBUG] S3 bucket: %s, Grants fallback to canned ACL", d.Id()) - - if err := resourceBucketInternalACLUpdate(ctx, conn, d); err != nil { - return fmt.Errorf("fallback to canned ACL, %s", err) - } - - return nil - } - - resp, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.GetBucketAclWithContext(ctx, &s3.GetBucketAclInput{ - Bucket: aws.String(d.Id()), - }) - }, s3.ErrCodeNoSuchBucket) - - if err != nil { - return fmt.Errorf("getting S3 Bucket (%s) ACL: %s", d.Id(), err) - } - - output := resp.(*s3.GetBucketAclOutput) - - if output == nil { - return fmt.Errorf("getting S3 Bucket (%s) ACL: empty output", d.Id()) - } - - input := &s3.PutBucketAclInput{ - Bucket: aws.String(d.Id()), - AccessControlPolicy: &s3.AccessControlPolicy{ - Grants: expandGrants(grants.List()), - Owner: output.Owner, - }, - } - - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketAclWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalLifecycleUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - lifecycleRules := d.Get("lifecycle_rule").([]interface{}) - - if len(lifecycleRules) == 0 || lifecycleRules[0] == nil { - input := &s3.DeleteBucketLifecycleInput{ - Bucket: aws.String(d.Id()), - } - - _, err := conn.DeleteBucketLifecycleWithContext(ctx, input) - - if err != nil { - return fmt.Errorf("removing S3 Bucket (%s) lifecycle: %w", d.Id(), err) - } - - return nil - } - - rules := make([]*s3.LifecycleRule, 0, len(lifecycleRules)) - - for i, lifecycleRule := range lifecycleRules { - r := lifecycleRule.(map[string]interface{}) - - rule := &s3.LifecycleRule{} - - // Filter - tags := Tags(tftags.New(ctx, r["tags"]).IgnoreAWS()) - filter := &s3.LifecycleRuleFilter{} - if len(tags) > 0 { - lifecycleRuleAndOp := &s3.LifecycleRuleAndOperator{} - lifecycleRuleAndOp.SetPrefix(r["prefix"].(string)) - lifecycleRuleAndOp.SetTags(tags) - filter.SetAnd(lifecycleRuleAndOp) - } else { - filter.SetPrefix(r["prefix"].(string)) - } - rule.SetFilter(filter) - - // ID - if val, ok := r["id"].(string); ok && val != "" { - rule.ID = aws.String(val) - } else { - rule.ID = aws.String(id.PrefixedUniqueId("tf-s3-lifecycle-")) - } - - // Enabled - if val, ok := r["enabled"].(bool); ok && val { - rule.Status = aws.String(s3.ExpirationStatusEnabled) - } else { - rule.Status = aws.String(s3.ExpirationStatusDisabled) - } - - // AbortIncompleteMultipartUpload - if val, ok := r["abort_incomplete_multipart_upload_days"].(int); ok && val > 0 { - rule.AbortIncompleteMultipartUpload = &s3.AbortIncompleteMultipartUpload{ - DaysAfterInitiation: aws.Int64(int64(val)), - } - } - - // Expiration - expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.expiration", i)).([]interface{}) - if len(expiration) > 0 && expiration[0] != nil { - e := expiration[0].(map[string]interface{}) - i := &s3.LifecycleExpiration{} - if val, ok := e["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := e["days"].(int); ok && val > 0 { - i.Days = aws.Int64(int64(val)) - } else if val, ok := e["expired_object_delete_marker"].(bool); ok { - i.ExpiredObjectDeleteMarker = aws.Bool(val) - } - rule.Expiration = i - } - - // NoncurrentVersionExpiration - nc_expiration := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_expiration", i)).([]interface{}) - if len(nc_expiration) > 0 && nc_expiration[0] != nil { - e := nc_expiration[0].(map[string]interface{}) - - if val, ok := e["days"].(int); ok && val > 0 { - rule.NoncurrentVersionExpiration = &s3.NoncurrentVersionExpiration{ - NoncurrentDays: aws.Int64(int64(val)), - } - } - } - - // Transitions - transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.transition", i)).(*schema.Set).List() - if len(transitions) > 0 { - rule.Transitions = make([]*s3.Transition, 0, len(transitions)) - for _, transition := range transitions { - transition := transition.(map[string]interface{}) - i := &s3.Transition{} - if val, ok := transition["date"].(string); ok && val != "" { - t, err := time.Parse(time.RFC3339, fmt.Sprintf("%sT00:00:00Z", val)) - if err != nil { - return fmt.Errorf("parsing AWS S3 Bucket Lifecycle Expiration Date: %s", err.Error()) - } - i.Date = aws.Time(t) - } else if val, ok := transition["days"].(int); ok && val >= 0 { - i.Days = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } - - rule.Transitions = append(rule.Transitions, i) - } - } - // NoncurrentVersionTransitions - nc_transitions := d.Get(fmt.Sprintf("lifecycle_rule.%d.noncurrent_version_transition", i)).(*schema.Set).List() - if len(nc_transitions) > 0 { - rule.NoncurrentVersionTransitions = make([]*s3.NoncurrentVersionTransition, 0, len(nc_transitions)) - for _, transition := range nc_transitions { - transition := transition.(map[string]interface{}) - i := &s3.NoncurrentVersionTransition{} - if val, ok := transition["days"].(int); ok && val >= 0 { - i.NoncurrentDays = aws.Int64(int64(val)) - } - if val, ok := transition["storage_class"].(string); ok && val != "" { - i.StorageClass = aws.String(val) - } - - rule.NoncurrentVersionTransitions = append(rule.NoncurrentVersionTransitions, i) - } - } - - // As a lifecycle rule requires 1 or more transition/expiration actions, - // we explicitly pass a default ExpiredObjectDeleteMarker value to be able to create - // the rule while keeping the policy unaffected if the conditions are not met. - if rule.Expiration == nil && rule.NoncurrentVersionExpiration == nil && - rule.Transitions == nil && rule.NoncurrentVersionTransitions == nil && - rule.AbortIncompleteMultipartUpload == nil { - rule.Expiration = &s3.LifecycleExpiration{ExpiredObjectDeleteMarker: aws.Bool(false)} - } - - rules = append(rules, rule) - } - - input := &s3.PutBucketLifecycleConfigurationInput{ - Bucket: aws.String(d.Id()), - LifecycleConfiguration: &s3.BucketLifecycleConfiguration{ - Rules: rules, - }, - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketLifecycleConfigurationWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalLoggingUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - logging := d.Get("logging").([]interface{}) - loggingStatus := &s3.BucketLoggingStatus{} - - if len(logging) > 0 && logging[0] != nil { - c := logging[0].(map[string]interface{}) - - loggingEnabled := &s3.LoggingEnabled{} - if val, ok := c["target_bucket"].(string); ok { - loggingEnabled.TargetBucket = aws.String(val) - } - if val, ok := c["target_prefix"].(string); ok { - loggingEnabled.TargetPrefix = aws.String(val) - } - - loggingStatus.LoggingEnabled = loggingEnabled - } - - input := &s3.PutBucketLoggingInput{ - Bucket: aws.String(d.Id()), - BucketLoggingStatus: loggingStatus, - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketLoggingWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalObjectLockConfigurationUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - // S3 Object Lock configuration cannot be deleted, only updated. - req := &s3.PutObjectLockConfigurationInput{ - Bucket: aws.String(d.Id()), - ObjectLockConfiguration: expandObjectLockConfiguration(d.Get("object_lock_configuration").([]interface{})), - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutObjectLockConfigurationWithContext(ctx, req) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalReplicationConfigurationUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - replicationConfiguration := d.Get("replication_configuration").([]interface{}) - - if len(replicationConfiguration) == 0 { - input := &s3.DeleteBucketReplicationInput{ - Bucket: aws.String(d.Id()), - } - - _, err := conn.DeleteBucketReplicationWithContext(ctx, input) - - if err != nil { - return fmt.Errorf("removing S3 Bucket (%s) Replication: %w", d.Id(), err) - } - - return nil - } - - hasVersioning := false - // Validate that bucket versioning is enabled - if versioning, ok := d.GetOk("versioning"); ok { - v := versioning.([]interface{}) - - if v[0].(map[string]interface{})["enabled"].(bool) { - hasVersioning = true - } - } - - if !hasVersioning { - return fmt.Errorf("versioning must be enabled to allow S3 bucket replication") - } - - input := &s3.PutBucketReplicationInput{ - Bucket: aws.String(d.Id()), - ReplicationConfiguration: expandBucketReplicationConfiguration(ctx, replicationConfiguration), - } - - err := retry.RetryContext(ctx, d.Timeout(schema.TimeoutUpdate), func() *retry.RetryError { - _, err := conn.PutBucketReplicationWithContext(ctx, input) - if tfawserr.ErrCodeEquals(err, s3.ErrCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, errCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { - return retry.RetryableError(err) - } - if err != nil { - return retry.NonRetryableError(err) - } - return nil - }) - - if tfresource.TimedOut(err) { - _, err = conn.PutBucketReplicationWithContext(ctx, input) - } - - return err -} - -func resourceBucketInternalRequestPayerUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - payer := d.Get("request_payer").(string) - - input := &s3.PutBucketRequestPaymentInput{ - Bucket: aws.String(d.Id()), - RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ - Payer: aws.String(payer), - }, - } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketRequestPaymentWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) - - return err -} - -func resourceBucketInternalServerSideEncryptionConfigurationUpdate(ctx context.Context, conn *s3.S3, d *schema.ResourceData) error { - serverSideEncryptionConfiguration := d.Get("server_side_encryption_configuration").([]interface{}) - - if len(serverSideEncryptionConfiguration) == 0 { - input := &s3.DeleteBucketEncryptionInput{ - Bucket: aws.String(d.Id()), - } - - _, err := conn.DeleteBucketEncryptionWithContext(ctx, input) - - if err != nil { - return fmt.Errorf("removing S3 Bucket (%s) Server-side Encryption: %w", d.Id(), err) - } - - return nil - } - - c := serverSideEncryptionConfiguration[0].(map[string]interface{}) - - rc := &s3.ServerSideEncryptionConfiguration{} - - rcRules := c["rule"].([]interface{}) - var rules []*s3.ServerSideEncryptionRule - for _, v := range rcRules { - rr := v.(map[string]interface{}) - rrDefault := rr["apply_server_side_encryption_by_default"].([]interface{}) - sseAlgorithm := rrDefault[0].(map[string]interface{})["sse_algorithm"].(string) - kmsMasterKeyId := rrDefault[0].(map[string]interface{})["kms_master_key_id"].(string) - rcDefaultRule := &s3.ServerSideEncryptionByDefault{ - SSEAlgorithm: aws.String(sseAlgorithm), - } - if kmsMasterKeyId != "" { - rcDefaultRule.KMSMasterKeyID = aws.String(kmsMasterKeyId) - } - rcRule := &s3.ServerSideEncryptionRule{ - ApplyServerSideEncryptionByDefault: rcDefaultRule, - } - - if val, ok := rr["bucket_key_enabled"].(bool); ok { - rcRule.BucketKeyEnabled = aws.Bool(val) - } - - rules = append(rules, rcRule) + if err != nil { + var zero T + return zero, err } - rc.Rules = rules + return outputRaw.(T), nil +} - input := &s3.PutBucketEncryptionInput{ - Bucket: aws.String(d.Id()), - ServerSideEncryptionConfiguration: rc, +// https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region +func bucketRegionalDomainName(bucket, region string) string { + // Return a default AWS Commercial domain name if no Region is provided. + if region == "" { + return fmt.Sprintf("%s.s3.amazonaws.com", bucket) //lintignore:AWSR001 } - - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), - func() (interface{}, error) { - return conn.PutBucketEncryptionWithContext(ctx, input) - }, - s3.ErrCodeNoSuchBucket, - errCodeOperationAborted, - ) - - return err + return fmt.Sprintf("%s.s3.%s.%s", bucket, region, names.DNSSuffixForPartition(names.PartitionForRegion(region))) } -func resourceBucketInternalVersioningUpdate(ctx context.Context, conn *s3.S3, bucket string, versioningConfig *s3.VersioningConfiguration, timeout time.Duration) error { - input := &s3.PutBucketVersioningInput{ - Bucket: aws.String(bucket), - VersioningConfiguration: versioningConfig, +func bucketWebsiteEndpointAndDomain(bucket, region string) (string, string) { + var domain string + + // Default to us-east-1 if the bucket doesn't have a region: + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html + if region == "" { + region = names.USEast1RegionID } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, timeout, func() (interface{}, error) { - return conn.PutBucketVersioningWithContext(ctx, input) - }, s3.ErrCodeNoSuchBucket) + // Different regions have different syntax for website endpoints: + // https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteEndpoints.html + // https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints + oldRegions := []string{ + names.APNortheast1RegionID, + names.APSoutheast1RegionID, + names.APSoutheast2RegionID, + names.EUWest1RegionID, + names.SAEast1RegionID, + names.USEast1RegionID, + names.USGovWest1RegionID, + names.USWest1RegionID, + names.USWest2RegionID, + } + if slices.Contains(oldRegions, region) { + domain = fmt.Sprintf("s3-website-%s.amazonaws.com", region) //lintignore:AWSR001 + } else { + dnsSuffix := names.DNSSuffixForPartition(names.PartitionForRegion(region)) + domain = fmt.Sprintf("s3-website.%s.%s", region, dnsSuffix) + } - return err + return fmt.Sprintf("%s.%s", bucket, domain), domain } -///////////////////////////////////////////// Expand and Flatten functions ///////////////////////////////////////////// - -// Cors Rule functions - func expandBucketCORSRules(l []interface{}) []types.CORSRule { if len(l) == 0 { return nil @@ -2076,6 +1856,187 @@ func flattenGrants(ap *s3.GetBucketAclOutput) []interface{} { // Lifecycle Rule functions +func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.LifecycleRule { + if len(l) == 0 || l[0] == nil { + return nil + } + + var results []types.LifecycleRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + + if !ok { + continue + } + + result := types.LifecycleRule{} + + if v, ok := tfMap["abort_incomplete_multipart_upload"].(int); ok && v > 0 { + result.AbortIncompleteMultipartUpload = &types.AbortIncompleteMultipartUpload{ + DaysAfterInitiation: aws.Int32(int32(v)), + } + } + + if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 { + result.Expiration = expandBucketLifecycleRuleExpiration(v) + } + + var filter types.LifecycleRuleFilter + prefix := tfMap["prefix"].(string) + if tags := Tags(tftags.New(ctx, tfMap["tags"]).IgnoreAWS()); len(tags) > 0 { + filter = &types.LifecycleRuleFilterMemberAnd{ + Value: types.LifecycleRuleAndOperator{ + Prefix: aws.String(prefix), + Tags: tags, + }, + } + } else { + filter = &types.LifecycleRuleFilterMemberPrefix{ + Value: prefix, + } + } + result.Filter = filter + + if v, ok := tfMap["id"].(string); ok { + result.ID = aws.String(v) + } else { + result.ID = aws.String(id.PrefixedUniqueId("tf-s3-lifecycle-")) + } + + if v, ok := tfMap["noncurrent_version_expiration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + result.NoncurrentVersionExpiration = expandBucketLifecycleRuleNoncurrentVersionExpiration(v[0].(map[string]interface{})) + } + + if v, ok := tfMap["noncurrent_version_transition"].(*schema.Set); ok && v.Len() > 0 { + result.NoncurrentVersionTransitions = expandBucketLifecycleRuleNoncurrentVersionTransitions(v.List()) + } + + if v, ok := tfMap["enabled"].(bool); ok && v { + result.Status = types.ExpirationStatusEnabled + } else { + result.Status = types.ExpirationStatusDisabled + } + + if v, ok := tfMap["transition"].(*schema.Set); ok && v.Len() > 0 { + result.Transitions = expandBucketLifecycleRuleTransitions(v.List()) + } + + // As a lifecycle rule requires 1 or more transition/expiration actions, + // we explicitly pass a default ExpiredObjectDeleteMarker value to be able to create + // the rule while keeping the policy unaffected if the conditions are not met. + if result.AbortIncompleteMultipartUpload == nil && result.Expiration == nil && result.NoncurrentVersionExpiration == nil && result.NoncurrentVersionTransitions == nil && result.Transitions == nil { + result.Expiration = &types.LifecycleExpiration{ExpiredObjectDeleteMarker: aws.Bool(false)} + } + + results = append(results, result) + } + + return results +} + +func expandBucketLifecycleRuleExpiration(l []interface{}) *types.LifecycleExpiration { + if len(l) == 0 { + return nil + } + + result := &types.LifecycleExpiration{} + + if l[0] == nil { + return result + } + + m := l[0].(map[string]interface{}) + + if v, ok := m["date"].(string); ok && v != "" { + t, _ := time.Parse(time.RFC3339, v+"T00:00:00Z") + result.Date = aws.Time(t) + } else if v, ok := m["days"].(int); ok && v > 0 { + result.Days = aws.Int32(int32(v)) + } else if v, ok := m["expired_object_delete_marker"].(bool); ok { + result.ExpiredObjectDeleteMarker = aws.Bool(v) + } + + return result +} + +func expandBucketLifecycleRuleNoncurrentVersionExpiration(m map[string]interface{}) *types.NoncurrentVersionExpiration { + if len(m) == 0 { + return nil + } + + var result *types.NoncurrentVersionExpiration + + if v, ok := m["days"].(int); ok { + result = &types.NoncurrentVersionExpiration{ + NoncurrentDays: aws.Int32(int32(v)), + } + } + + return result +} + +func expandBucketLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []types.NoncurrentVersionTransition { + if len(l) == 0 || l[0] == nil { + return nil + } + + var results []types.NoncurrentVersionTransition + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + transition := types.NoncurrentVersionTransition{} + + if v, ok := tfMap["days"].(int); ok { + transition.NoncurrentDays = aws.Int32(int32(v)) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + transition.StorageClass = types.TransitionStorageClass(v) + } + + results = append(results, transition) + } + + return results +} + +func expandBucketLifecycleRuleTransitions(l []interface{}) []types.Transition { + if len(l) == 0 || l[0] == nil { + return nil + } + + var results []types.Transition + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + transition := types.Transition{} + + if v, ok := tfMap["date"].(string); ok && v != "" { + t, _ := time.Parse(time.RFC3339, v+"T00:00:00Z") + transition.Date = aws.Time(t) + } else if v, ok := tfMap["days"].(int); ok && v >= 0 { + transition.Days = aws.Int32(int32(v)) + } + + if v, ok := tfMap["storage_class"].(string); ok && v != "" { + transition.StorageClass = types.TransitionStorageClass(v) + } + + results = append(results, transition) + } + + return results +} + func flattenBucketLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { if expiration == nil { return []interface{}{} @@ -2258,44 +2219,95 @@ func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{ return []interface{}{m} } +func expandBucketServerSideEncryptionRules(l []interface{}) []types.ServerSideEncryptionRule { + var rules []types.ServerSideEncryptionRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } + + rule := types.ServerSideEncryptionRule{} + + if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionByDefault(v) + } + + if v, ok := tfMap["bucket_key_enabled"].(bool); ok { + rule.BucketKeyEnabled = aws.Bool(v) + } + + rules = append(rules, rule) + } + + return rules +} + +func expandBucketServerSideEncryptionByDefault(l []interface{}) *types.ServerSideEncryptionByDefault { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + sse := &types.ServerSideEncryptionByDefault{} + + if v, ok := tfMap["kms_master_key_id"].(string); ok && v != "" { + sse.KMSMasterKeyID = aws.String(v) + } + + if v, ok := tfMap["sse_algorithm"].(string); ok && v != "" { + sse.SSEAlgorithm = types.ServerSideEncryption(v) + } + + return sse +} + // Object Lock Configuration functions -func expandObjectLockConfiguration(vConf []interface{}) *types.ObjectLockConfiguration { - if len(vConf) == 0 || vConf[0] == nil { +func expandBucketObjectLockConfiguration(l []interface{}) *types.ObjectLockConfiguration { + if len(l) == 0 || l[0] == nil { return nil } - mConf := vConf[0].(map[string]interface{}) + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } - conf := &types.ObjectLockConfiguration{} + apiObject := &types.ObjectLockConfiguration{} - if vObjectLockEnabled, ok := mConf["object_lock_enabled"].(string); ok && vObjectLockEnabled != "" { - conf.ObjectLockEnabled = types.ObjectLockEnabled(vObjectLockEnabled) + if v, ok := tfMap["object_lock_enabled"].(string); ok && v != "" { + apiObject.ObjectLockEnabled = types.ObjectLockEnabled(v) } - if vRule, ok := mConf["rule"].([]interface{}); ok && len(vRule) > 0 { - mRule := vRule[0].(map[string]interface{}) + if v, ok := tfMap["rule"].([]interface{}); ok && len(v) > 0 { + tfMap := v[0].(map[string]interface{}) - if vDefaultRetention, ok := mRule["default_retention"].([]interface{}); ok && len(vDefaultRetention) > 0 && vDefaultRetention[0] != nil { - mDefaultRetention := vDefaultRetention[0].(map[string]interface{}) + if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + tfMap := v[0].(map[string]interface{}) - conf.Rule = &types.ObjectLockRule{ + apiObject.Rule = &types.ObjectLockRule{ DefaultRetention: &types.DefaultRetention{}, } - if vMode, ok := mDefaultRetention["mode"].(string); ok && vMode != "" { - conf.Rule.DefaultRetention.Mode = types.ObjectLockRetentionMode(vMode) + if v, ok := tfMap["days"].(int); ok && v > 0 { + apiObject.Rule.DefaultRetention.Days = aws.Int32(int32(v)) } - if vDays, ok := mDefaultRetention["days"].(int); ok && vDays > 0 { - conf.Rule.DefaultRetention.Days = aws.Int32(int32(vDays)) + if v, ok := tfMap["mode"].(string); ok && v != "" { + apiObject.Rule.DefaultRetention.Mode = types.ObjectLockRetentionMode(v) } - if vYears, ok := mDefaultRetention["years"].(int); ok && vYears > 0 { - conf.Rule.DefaultRetention.Years = aws.Int32(int32(vYears)) + if v, ok := tfMap["years"].(int); ok && v > 0 { + apiObject.Rule.DefaultRetention.Years = aws.Int32(int32(v)) } } } - return conf + return apiObject } func flattenObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interface{} { @@ -2324,9 +2336,7 @@ func flattenObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interfac return []interface{}{mConf} } -// Replication Configuration functions - -func expandBucketReplicationConfiguration(ctx context.Context, l []interface{}) *s3.ReplicationConfiguration { +func expandBucketReplicationConfiguration(ctx context.Context, l []interface{}) *types.ReplicationConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -2336,21 +2346,21 @@ func expandBucketReplicationConfiguration(ctx context.Context, l []interface{}) return nil } - rc := &s3.ReplicationConfiguration{} + apiObject := &types.ReplicationConfiguration{} - if val, ok := tfMap["role"].(string); ok { - rc.Role = aws.String(val) + if v, ok := tfMap["role"].(string); ok { + apiObject.Role = aws.String(v) } if v, ok := tfMap["rules"].(*schema.Set); ok && v.Len() > 0 { - rc.Rules = expandBucketReplicationConfigurationRules(ctx, v.List()) + apiObject.Rules = expandBucketReplicationConfigurationRules(ctx, v.List()) } - return rc + return apiObject } -func expandBucketReplicationConfigurationRules(ctx context.Context, l []interface{}) []*s3.ReplicationRule { - var rules []*s3.ReplicationRule +func expandBucketReplicationConfigurationRules(ctx context.Context, l []interface{}) []types.ReplicationRule { + var rules []types.ReplicationRule for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -2358,67 +2368,74 @@ func expandBucketReplicationConfigurationRules(ctx context.Context, l []interfac continue } - rcRule := &s3.ReplicationRule{} + rule := types.ReplicationRule{} - if status, ok := tfMap["status"].(string); ok && status != "" { - rcRule.Status = aws.String(status) + if v, ok := tfMap["status"].(string); ok && v != "" { + rule.Status = types.ReplicationRuleStatus(v) } else { continue } + if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.Destination = expandBucketReplicationConfigurationRulesDestination(v) + } else { + rule.Destination = &types.Destination{} + } + if v, ok := tfMap["id"].(string); ok && v != "" { - rcRule.ID = aws.String(v) + rule.ID = aws.String(v) } - if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rcRule.Destination = expandBucketReplicationConfigurationRulesDestination(v) - } else { - rcRule.Destination = &s3.Destination{} + if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) } if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rcRule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) + rule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) } if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { // XML schema V2. - rcRule.Priority = aws.Int64(int64(tfMap["priority"].(int))) - - rcRule.Filter = &s3.ReplicationRuleFilter{} - - filter := v[0].(map[string]interface{}) - tags := Tags(tftags.New(ctx, filter["tags"]).IgnoreAWS()) - - if len(tags) > 0 { - rcRule.Filter.And = &s3.ReplicationRuleAndOperator{ - Prefix: aws.String(filter["prefix"].(string)), - Tags: tags, + tfMap := v[0].(map[string]interface{}) + var filter types.ReplicationRuleFilter + + if tags := Tags(tftags.New(ctx, tfMap["tags"]).IgnoreAWS()); len(tags) > 0 { + filter = &types.ReplicationRuleFilterMemberAnd{ + Value: types.ReplicationRuleAndOperator{ + Prefix: aws.String(tfMap["prefix"].(string)), + Tags: tags, + }, } } else { - rcRule.Filter.Prefix = aws.String(filter["prefix"].(string)) + filter = &types.ReplicationRuleFilterMemberPrefix{ + Value: tfMap["prefix"].(string), + } } - if dmr, ok := tfMap["delete_marker_replication_status"].(string); ok && dmr != "" { - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(dmr), + rule.Filter = filter + rule.Priority = aws.Int32(int32(tfMap["priority"].(int))) + + if v, ok := tfMap["delete_marker_replication_status"].(string); ok && v != "" { + rule.DeleteMarkerReplication = &types.DeleteMarkerReplication{ + Status: types.DeleteMarkerReplicationStatus(v), } } else { - rcRule.DeleteMarkerReplication = &s3.DeleteMarkerReplication{ - Status: aws.String(s3.DeleteMarkerReplicationStatusDisabled), + rule.DeleteMarkerReplication = &types.DeleteMarkerReplication{ + Status: types.DeleteMarkerReplicationStatusDisabled, } } } else { // XML schema V1. - rcRule.Prefix = aws.String(tfMap["prefix"].(string)) + rule.Prefix = aws.String(tfMap["prefix"].(string)) } - rules = append(rules, rcRule) + rules = append(rules, rule) } return rules } -func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *s3.Destination { +func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *types.Destination { if len(l) == 0 || l[0] == nil { return nil } @@ -2428,31 +2445,31 @@ func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *s3.D return nil } - ruleDestination := &s3.Destination{} + apiObject := &types.Destination{} if v, ok := tfMap["bucket"].(string); ok { - ruleDestination.Bucket = aws.String(v) + apiObject.Bucket = aws.String(v) } if v, ok := tfMap["storage_class"].(string); ok && v != "" { - ruleDestination.StorageClass = aws.String(v) + apiObject.StorageClass = types.StorageClass(v) } if v, ok := tfMap["replica_kms_key_id"].(string); ok && v != "" { - ruleDestination.EncryptionConfiguration = &s3.EncryptionConfiguration{ + apiObject.EncryptionConfiguration = &types.EncryptionConfiguration{ ReplicaKmsKeyID: aws.String(v), } } if v, ok := tfMap["account_id"].(string); ok && v != "" { - ruleDestination.Account = aws.String(v) + apiObject.Account = aws.String(v) } if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { aclTranslationValues := v[0].(map[string]interface{}) ruleAclTranslation := &s3.AccessControlTranslation{} ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) - ruleDestination.AccessControlTranslation = ruleAclTranslation + apiObject.AccessControlTranslation = ruleAclTranslation } // replication metrics (required for RTC) @@ -2462,7 +2479,7 @@ func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *s3.D metricsConfig.EventThreshold = &s3.ReplicationTimeValue{} metricsConfig.Status = aws.String(metricsValues["status"].(string)) metricsConfig.EventThreshold.Minutes = aws.Int64(int64(metricsValues["minutes"].(int))) - ruleDestination.Metrics = metricsConfig + apiObject.Metrics = metricsConfig } // replication time control (RTC) @@ -2472,13 +2489,13 @@ func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *s3.D rtcConfig.Status = aws.String(rtcValues["status"].(string)) rtcConfig.Time = &s3.ReplicationTimeValue{} rtcConfig.Time.Minutes = aws.Int64(int64(rtcValues["minutes"].(int))) - ruleDestination.ReplicationTime = rtcConfig + apiObject.ReplicationTime = rtcConfig } - return ruleDestination + return apiObject } -func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interface{}) *s3.SourceSelectionCriteria { +func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interface{}) *types.SourceSelectionCriteria { if len(l) == 0 || l[0] == nil { return nil } @@ -2488,21 +2505,22 @@ func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interf return nil } - ruleSsc := &s3.SourceSelectionCriteria{} + result := &types.SourceSelectionCriteria{} if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - sseKmsValues := v[0].(map[string]interface{}) - sseKmsEncryptedObjects := &s3.SseKmsEncryptedObjects{} + tfMap := v[0].(map[string]interface{}) + apiObject := &types.SseKmsEncryptedObjects{} - if sseKmsValues["enabled"].(bool) { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusEnabled) + if tfMap["enabled"].(bool) { + apiObject.Status = types.SseKmsEncryptedObjectsStatusEnabled } else { - sseKmsEncryptedObjects.Status = aws.String(s3.SseKmsEncryptedObjectsStatusDisabled) + apiObject.Status = types.SseKmsEncryptedObjectsStatusDisabled } - ruleSsc.SseKmsEncryptedObjects = sseKmsEncryptedObjects + + result.SseKmsEncryptedObjects = apiObject } - return ruleSsc + return result } func flattenBucketReplicationConfiguration(ctx context.Context, r *s3.ReplicationConfiguration) []interface{} { @@ -2719,69 +2737,67 @@ func flattenServerSideEncryptionConfigurationRules(rules []*s3.ServerSideEncrypt // Versioning functions -func expandVersioning(l []interface{}) *s3.VersioningConfiguration { +func expandBucketVersioningConfigurationCreate(l []interface{}) *types.VersioningConfiguration { if len(l) == 0 || l[0] == nil { return nil } tfMap, ok := l[0].(map[string]interface{}) - if !ok { return nil } - output := &s3.VersioningConfiguration{} + apiObject := &types.VersioningConfiguration{} - if v, ok := tfMap["enabled"].(bool); ok { - if v { - output.Status = aws.String(s3.BucketVersioningStatusEnabled) - } else { - output.Status = aws.String(s3.BucketVersioningStatusSuspended) - } + // Only set and return a non-nil VersioningConfiguration with at least one of + // MFADelete or Status enabled as the PutBucketVersioning API request + // does not need to be made for new buckets that don't require versioning. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/4494. + + if v, ok := tfMap["enabled"].(bool); ok && v { + apiObject.Status = types.BucketVersioningStatusEnabled } - if v, ok := tfMap["mfa_delete"].(bool); ok { - if v { - output.MFADelete = aws.String(s3.MFADeleteEnabled) - } else { - output.MFADelete = aws.String(s3.MFADeleteDisabled) - } + if v, ok := tfMap["mfa_delete"].(bool); ok && v { + apiObject.MFADelete = types.MFADeleteEnabled } - return output + if itypes.IsZero(&apiObject) { + return nil + } + + return apiObject } -func expandVersioningWhenIsNewResource(l []interface{}) *s3.VersioningConfiguration { +func expandBucketVersioningConfigurationUpdate(l []interface{}) *types.VersioningConfiguration { if len(l) == 0 || l[0] == nil { return nil } tfMap, ok := l[0].(map[string]interface{}) - if !ok { return nil } - output := &s3.VersioningConfiguration{} + apiObject := &types.VersioningConfiguration{} - // Only set and return a non-nil VersioningConfiguration with at least one of - // MFADelete or Status enabled as the PutBucketVersioning API request - // does not need to be made for new buckets that don't require versioning. - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/4494 - - if v, ok := tfMap["enabled"].(bool); ok && v { - output.Status = aws.String(s3.BucketVersioningStatusEnabled) - } - - if v, ok := tfMap["mfa_delete"].(bool); ok && v { - output.MFADelete = aws.String(s3.MFADeleteEnabled) + if v, ok := tfMap["enabled"].(bool); ok { + if v { + apiObject.Status = types.BucketVersioningStatusEnabled + } else { + apiObject.Status = types.BucketVersioningStatusSuspended + } } - if output.MFADelete == nil && output.Status == nil { - return nil + if v, ok := tfMap["mfa_delete"].(bool); ok { + if v { + apiObject.MFADelete = types.MFADeleteEnabled + } else { + apiObject.MFADelete = types.MFADeleteDisabled + } } - return output + return apiObject } func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { @@ -2808,7 +2824,16 @@ func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { // Website functions -func expandBucketWebsiteConfiguration(tfMap map[string]interface{}) (*types.WebsiteConfiguration, error) { +func expandBucketWebsiteConfiguration(l []interface{}) (*types.WebsiteConfiguration, error) { + if len(l) == 0 || l[0] == nil { + return nil, nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil, nil + } + websiteConfig := &types.WebsiteConfiguration{} if v, ok := tfMap["index_document"].(string); ok && v != "" { diff --git a/internal/service/s3/bucket_object_lock_configuration.go b/internal/service/s3/bucket_object_lock_configuration.go index cda36a61368..9ede730b67d 100644 --- a/internal/service/s3/bucket_object_lock_configuration.go +++ b/internal/service/s3/bucket_object_lock_configuration.go @@ -107,7 +107,7 @@ func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema. // ObjectLockEnabled is required by the API, even if configured directly on the S3 bucket // during creation, else a MalformedXML error will be returned. ObjectLockEnabled: types.ObjectLockEnabled(d.Get("object_lock_enabled").(string)), - Rule: expandBucketObjectLockConfigurationRule(d.Get("rule").([]interface{})), + Rule: expandObjectLockRule(d.Get("rule").([]interface{})), }, } if expectedBucketOwner != "" { @@ -170,7 +170,7 @@ func resourceBucketObjectLockConfigurationRead(ctx context.Context, d *schema.Re d.Set("bucket", bucket) d.Set("expected_bucket_owner", expectedBucketOwner) d.Set("object_lock_enabled", objLockConfig.ObjectLockEnabled) - if err := d.Set("rule", flattenBucketObjectLockConfigurationRule(objLockConfig.Rule)); err != nil { + if err := d.Set("rule", flattenObjectLockRule(objLockConfig.Rule)); err != nil { return diag.Errorf("setting rule: %s", err) } @@ -191,7 +191,7 @@ func resourceBucketObjectLockConfigurationUpdate(ctx context.Context, d *schema. // ObjectLockEnabled is required by the API, even if configured directly on the S3 bucket // during creation, else a MalformedXML error will be returned. ObjectLockEnabled: types.ObjectLockEnabled(d.Get("object_lock_enabled").(string)), - Rule: expandBucketObjectLockConfigurationRule(d.Get("rule").([]interface{})), + Rule: expandObjectLockRule(d.Get("rule").([]interface{})), }, } if expectedBucketOwner != "" { @@ -282,7 +282,7 @@ func findObjectLockConfiguration(ctx context.Context, conn *s3.Client, bucket, e return output.ObjectLockConfiguration, nil } -func expandBucketObjectLockConfigurationRule(l []interface{}) *types.ObjectLockRule { +func expandObjectLockRule(l []interface{}) *types.ObjectLockRule { if len(l) == 0 || l[0] == nil { return nil } @@ -295,13 +295,13 @@ func expandBucketObjectLockConfigurationRule(l []interface{}) *types.ObjectLockR rule := &types.ObjectLockRule{} if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.DefaultRetention = expandBucketObjectLockConfigurationCorsRuleDefaultRetention(v) + rule.DefaultRetention = expandDefaultRetention(v) } return rule } -func expandBucketObjectLockConfigurationCorsRuleDefaultRetention(l []interface{}) *types.DefaultRetention { +func expandDefaultRetention(l []interface{}) *types.DefaultRetention { if len(l) == 0 || l[0] == nil { return nil } @@ -328,7 +328,7 @@ func expandBucketObjectLockConfigurationCorsRuleDefaultRetention(l []interface{} return dr } -func flattenBucketObjectLockConfigurationRule(rule *types.ObjectLockRule) []interface{} { +func flattenObjectLockRule(rule *types.ObjectLockRule) []interface{} { if rule == nil { return []interface{}{} } @@ -336,13 +336,13 @@ func flattenBucketObjectLockConfigurationRule(rule *types.ObjectLockRule) []inte m := make(map[string]interface{}) if rule.DefaultRetention != nil { - m["default_retention"] = flattenBucketObjectLockConfigurationRuleDefaultRetention(rule.DefaultRetention) + m["default_retention"] = flattenDefaultRetention(rule.DefaultRetention) } return []interface{}{m} } -func flattenBucketObjectLockConfigurationRuleDefaultRetention(dr *types.DefaultRetention) []interface{} { +func flattenDefaultRetention(dr *types.DefaultRetention) []interface{} { if dr == nil { return []interface{}{} } diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 24dd0564961..be71164bfb7 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -478,6 +478,7 @@ func expandReplicationRules(ctx context.Context, l []interface{}) []types.Replic if !ok { continue } + rule := types.ReplicationRule{} if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -720,7 +721,6 @@ func expandReplicationRuleSourceSelectionCriteria(l []interface{}) *types.Source } tfMap, ok := l[0].(map[string]interface{}) - if !ok { return nil } diff --git a/internal/service/s3/bucket_server_side_encryption_configuration.go b/internal/service/s3/bucket_server_side_encryption_configuration.go index fe99e984580..e6aef383959 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration.go @@ -88,7 +88,7 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, input := &s3.PutBucketEncryptionInput{ Bucket: aws.String(bucket), ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ - Rules: expandBucketServerSideEncryptionConfigurationRules(d.Get("rule").(*schema.Set).List()), + Rules: expandServerSideEncryptionRules(d.Get("rule").(*schema.Set).List()), }, } if expectedBucketOwner != "" { @@ -142,7 +142,7 @@ func resourceBucketServerSideEncryptionConfigurationRead(ctx context.Context, d d.Set("bucket", bucket) d.Set("expected_bucket_owner", expectedBucketOwner) - if err := d.Set("rule", flattenBucketServerSideEncryptionConfigurationRules(sse.Rules)); err != nil { + if err := d.Set("rule", flattenServerSideEncryptionRules(sse.Rules)); err != nil { return diag.Errorf("setting rule: %s", err) } @@ -160,7 +160,7 @@ func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, input := &s3.PutBucketEncryptionInput{ Bucket: aws.String(bucket), ServerSideEncryptionConfiguration: &types.ServerSideEncryptionConfiguration{ - Rules: expandBucketServerSideEncryptionConfigurationRules(d.Get("rule").(*schema.Set).List()), + Rules: expandServerSideEncryptionRules(d.Get("rule").(*schema.Set).List()), }, } if expectedBucketOwner != "" { @@ -236,7 +236,7 @@ func findServerSideEncryptionConfiguration(ctx context.Context, conn *s3.Client, return output.ServerSideEncryptionConfiguration, nil } -func expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(l []interface{}) *types.ServerSideEncryptionByDefault { +func expandServerSideEncryptionByDefault(l []interface{}) *types.ServerSideEncryptionByDefault { if len(l) == 0 || l[0] == nil { return nil } @@ -259,7 +259,7 @@ func expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionB return sse } -func expandBucketServerSideEncryptionConfigurationRules(l []interface{}) []types.ServerSideEncryptionRule { +func expandServerSideEncryptionRules(l []interface{}) []types.ServerSideEncryptionRule { var rules []types.ServerSideEncryptionRule for _, tfMapRaw := range l { @@ -271,19 +271,20 @@ func expandBucketServerSideEncryptionConfigurationRules(l []interface{}) []types rule := types.ServerSideEncryptionRule{} if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(v) + rule.ApplyServerSideEncryptionByDefault = expandServerSideEncryptionByDefault(v) } if v, ok := tfMap["bucket_key_enabled"].(bool); ok { rule.BucketKeyEnabled = aws.Bool(v) } + rules = append(rules, rule) } return rules } -func flattenBucketServerSideEncryptionConfigurationRules(rules []types.ServerSideEncryptionRule) []interface{} { +func flattenServerSideEncryptionRules(rules []types.ServerSideEncryptionRule) []interface{} { var results []interface{} for _, rule := range rules { @@ -292,7 +293,7 @@ func flattenBucketServerSideEncryptionConfigurationRules(rules []types.ServerSid } if rule.ApplyServerSideEncryptionByDefault != nil { - m["apply_server_side_encryption_by_default"] = flattenBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(rule.ApplyServerSideEncryptionByDefault) + m["apply_server_side_encryption_by_default"] = flattenServerSideEncryptionByDefault(rule.ApplyServerSideEncryptionByDefault) } results = append(results, m) @@ -301,7 +302,7 @@ func flattenBucketServerSideEncryptionConfigurationRules(rules []types.ServerSid return results } -func flattenBucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefault(sse *types.ServerSideEncryptionByDefault) []interface{} { +func flattenServerSideEncryptionByDefault(sse *types.ServerSideEncryptionByDefault) []interface{} { if sse == nil { return nil } From acbd7a0ba789f0e9dce2ac4564bbe7c207838c06 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 14:01:20 -0500 Subject: [PATCH 379/438] r/aws_s3_bucket: Tidy up Create further. --- internal/service/s3/bucket.go | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 5e6c0cb957d..019bf0051ab 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -713,6 +713,10 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta inte bucket := create.Name(d.Get("bucket").(string), d.Get("bucket_prefix").(string)) region := meta.(*conns.AWSClient).Region + if err := validBucketName(bucket, region); err != nil { + return sdkdiag.AppendErrorf(diags, "validating S3 Bucket (%s) name: %s", bucket, err) + } + // Special case: us-east-1 does not return error if the bucket already exists and is owned by // current account. It also resets the Bucket ACLs. if region == names.USEast1RegionID { @@ -735,18 +739,13 @@ func resourceBucketCreate(ctx context.Context, d *schema.ResourceData, meta inte input.ACL = types.BucketCannedACLPrivate } - // Special case us-east-1 region and do not set the LocationConstraint. - // See "Request Elements: http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUT.html + // See https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html#AmazonS3-CreateBucket-request-LocationConstraint. if region != names.USEast1RegionID { input.CreateBucketConfiguration = &types.CreateBucketConfiguration{ LocationConstraint: types.BucketLocationConstraint(region), } } - if err := validBucketName(bucket, region); err != nil { - return sdkdiag.AppendErrorf(diags, "validating S3 Bucket (%s) name: %s", bucket, err) - } - // S3 Object Lock is not supported on all partitions. if v, ok := d.GetOk("object_lock_enabled"); ok { input.ObjectLockEnabledForBucket = aws.Bool(v.(bool)) From 9e149726e119b2c8cde94b2f3f5de225b1344b50 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Thu, 21 Dec 2023 19:08:50 +0000 Subject: [PATCH 380/438] Update CHANGELOG.md for #35024 --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index edd577dc579..94066b450d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -22,6 +22,7 @@ BUG FIXES: * resource/aws_finspace_kx_dataview: Properly set `arn` attribute on read, resolving persistent differences when `tags` are configured ([#34998](https://github.com/hashicorp/terraform-provider-aws/issues/34998)) * resource/aws_lb_target_group: Fix diff on `stickiness.cookie_name` when `stickiness.type` is `lb_cookie` ([#31436](https://github.com/hashicorp/terraform-provider-aws/issues/31436)) * resource/aws_memorydb_cluster: Treat `snapshotting` status as pending when creating cluster ([#31077](https://github.com/hashicorp/terraform-provider-aws/issues/31077)) +* resource/aws_secretsmanager_secret_rotation: No longer ignores changes to `rotation_rules.automatically_after_days` when `rotation_rules.schedule_expression` is set. ([#35024](https://github.com/hashicorp/terraform-provider-aws/issues/35024)) * resource/aws_ssoadmin_application: Fix `portal_options.sign_in_options.application_url` triggering `ValidationError` when unset ([#34967](https://github.com/hashicorp/terraform-provider-aws/issues/34967)) ## 5.31.0 (December 15, 2023) From f5e97686e04bd9dfca444f31d784400b79641b07 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 16:08:35 -0500 Subject: [PATCH 381/438] Add 'tfslices.Of'. --- internal/slices/slices.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/internal/slices/slices.go b/internal/slices/slices.go index ea6a8b0d973..85a00f99007 100644 --- a/internal/slices/slices.go +++ b/internal/slices/slices.go @@ -5,6 +5,11 @@ package slices import "golang.org/x/exp/slices" +// Of returns a slice from varargs. +func Of[E any](vs ...E) []E { + return vs +} + // Reverse returns a reversed copy of the slice `s`. func Reverse[S ~[]E, E any](s S) S { n := len(s) From 6fd7f2b780f65dd2fd9e9c10489bc8e9ee81a07f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 16:13:08 -0500 Subject: [PATCH 382/438] enum: Use 'tfslices'. --- internal/enum/values.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/enum/values.go b/internal/enum/values.go index e0a02c98868..5bc4adab581 100644 --- a/internal/enum/values.go +++ b/internal/enum/values.go @@ -3,6 +3,10 @@ package enum +import ( + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" +) + type Valueser[T ~string] interface { ~string Values() []T @@ -17,10 +21,7 @@ func Values[T Valueser[T]]() []string { } func Slice[T Valueser[T]](l ...T) []string { - result := make([]string, len(l)) - for i, v := range l { - result[i] = string(v) - } - - return result + return tfslices.ApplyToAll(l, func(v T) string { + return string(v) + }) } From 7a907250962848f17b9c9f400010459988e8beea Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 16:37:20 -0500 Subject: [PATCH 383/438] r/aws_s3_bucket: Tidy up flex. --- internal/service/s3/bucket.go | 1173 ++++++++--------- internal/service/s3/bucket_acl.go | 98 +- .../service/s3/bucket_cors_configuration.go | 56 +- .../s3/bucket_lifecycle_configuration.go | 53 +- internal/service/s3/bucket_logging.go | 18 +- .../s3/bucket_replication_configuration.go | 99 +- ...et_server_side_encryption_configuration.go | 8 +- internal/service/s3/bucket_versioning.go | 4 +- .../s3/bucket_website_configuration.go | 174 +-- 9 files changed, 823 insertions(+), 860 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 019bf0051ab..8e2c09e7c2c 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -34,6 +34,7 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs" "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" "github.com/hashicorp/terraform-provider-aws/internal/flex" + tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" itypes "github.com/hashicorp/terraform-provider-aws/internal/types" @@ -849,7 +850,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: - if err := d.Set("grant", flattenGrants(bucketACL)); err != nil { + if err := d.Set("grant", flattenBucketGrants(bucketACL)); err != nil { return sdkdiag.AppendErrorf(diags, "setting grant: %s", err) } case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): @@ -925,7 +926,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: - if err := d.Set("versioning", flattenVersioning(bucketVersioning)); err != nil { + if err := d.Set("versioning", flattenBucketVersioning(bucketVersioning)); err != nil { return sdkdiag.AppendErrorf(diags, "setting versioning: %s", err) } case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): @@ -1065,7 +1066,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: - if err := d.Set("server_side_encryption_configuration", flattenServerSideEncryptionConfiguration(encryptionConfiguration)); err != nil { + if err := d.Set("server_side_encryption_configuration", flattenBucketServerSideEncryptionConfiguration(encryptionConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting server_side_encryption_configuration: %s", err) } case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented): @@ -1334,7 +1335,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte input := &s3.PutBucketAclInput{ AccessControlPolicy: &types.AccessControlPolicy{ - Grants: expandGrants(d.Get("grant").(*schema.Set).List()), + Grants: expandBucketGrants(d.Get("grant").(*schema.Set).List()), Owner: bucketACL.Owner, }, Bucket: aws.String(d.Id()), @@ -1700,6 +1701,10 @@ func bucketWebsiteEndpointAndDomain(bucket, region string) (string, string) { return fmt.Sprintf("%s.%s", bucket, domain), domain } +// +// Bucket CORS Configuration. +// + func expandBucketCORSRules(l []interface{}) []types.CORSRule { if len(l) == 0 { return nil @@ -1771,10 +1776,215 @@ func flattenBucketCORSRules(rules []types.CORSRule) []interface{} { return results } -// Grants functions +// +// Bucket Website Configuration. +// + +func expandBucketWebsiteConfiguration(l []interface{}) (*types.WebsiteConfiguration, error) { + if len(l) == 0 || l[0] == nil { + return nil, nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil, nil + } + + websiteConfig := &types.WebsiteConfiguration{} + + if v, ok := tfMap["index_document"].(string); ok && v != "" { + websiteConfig.IndexDocument = &types.IndexDocument{ + Suffix: aws.String(v), + } + } + + if v, ok := tfMap["error_document"].(string); ok && v != "" { + websiteConfig.ErrorDocument = &types.ErrorDocument{ + Key: aws.String(v), + } + } + + if v, ok := tfMap["redirect_all_requests_to"].(string); ok && v != "" { + redirect, err := url.Parse(v) + if err == nil && redirect.Scheme != "" { + var buf bytes.Buffer + + buf.WriteString(redirect.Host) + if redirect.Path != "" { + buf.WriteString(redirect.Path) + } + if redirect.RawQuery != "" { + buf.WriteString("?") + buf.WriteString(redirect.RawQuery) + } + websiteConfig.RedirectAllRequestsTo = &types.RedirectAllRequestsTo{ + HostName: aws.String(buf.String()), + Protocol: types.Protocol(redirect.Scheme), + } + } else { + websiteConfig.RedirectAllRequestsTo = &types.RedirectAllRequestsTo{ + HostName: aws.String(v), + } + } + } + + if v, ok := tfMap["routing_rules"].(string); ok && v != "" { + var routingRules []types.RoutingRule + if err := json.Unmarshal([]byte(v), &routingRules); err != nil { + return nil, err + } + websiteConfig.RoutingRules = routingRules + } + + return websiteConfig, nil +} + +func flattenBucketWebsite(apiObject *s3.GetBucketWebsiteOutput) ([]interface{}, error) { + if apiObject == nil { + return []interface{}{}, nil + } + + m := make(map[string]interface{}) + + if v := apiObject.IndexDocument; v != nil { + m["index_document"] = aws.ToString(v.Suffix) + } + + if v := apiObject.ErrorDocument; v != nil { + m["error_document"] = aws.ToString(v.Key) + } + + if apiObject := apiObject.RedirectAllRequestsTo; apiObject != nil { + hostName := aws.ToString(apiObject.HostName) + + if apiObject.Protocol == "" { + m["redirect_all_requests_to"] = hostName + } else { + var host string + var path string + var query string + + parsedHostName, err := url.Parse(hostName) + if err == nil { + host = parsedHostName.Host + path = parsedHostName.Path + query = parsedHostName.RawQuery + } else { + host = hostName + } + + m["redirect_all_requests_to"] = (&url.URL{ + Scheme: string(apiObject.Protocol), + Host: host, + Path: path, + RawQuery: query, + }).String() + } + } + + if apiObject := apiObject.RoutingRules; apiObject != nil { + rr, err := normalizeRoutingRules(apiObject) + if err != nil { + return nil, err + } + m["routing_rules"] = rr + } + + // We have special handling for the website configuration, + // so only return the configuration if there is one. + if len(m) == 0 { + return []interface{}{}, nil + } + + return []interface{}{m}, nil +} + +// +// Bucket Versioning. +// + +func expandBucketVersioningConfigurationCreate(l []interface{}) *types.VersioningConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + apiObject := &types.VersioningConfiguration{} + + // Only set and return a non-nil VersioningConfiguration with at least one of + // MFADelete or Status enabled as the PutBucketVersioning API request + // does not need to be made for new buckets that don't require versioning. + // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/4494. + + if v, ok := tfMap["enabled"].(bool); ok && v { + apiObject.Status = types.BucketVersioningStatusEnabled + } + + if v, ok := tfMap["mfa_delete"].(bool); ok && v { + apiObject.MFADelete = types.MFADeleteEnabled + } + + if itypes.IsZero(&apiObject) { + return nil + } + + return apiObject +} + +func expandBucketVersioningConfigurationUpdate(l []interface{}) *types.VersioningConfiguration { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + + apiObject := &types.VersioningConfiguration{} + + if v, ok := tfMap["enabled"].(bool); ok { + if v { + apiObject.Status = types.BucketVersioningStatusEnabled + } else { + apiObject.Status = types.BucketVersioningStatusSuspended + } + } + + if v, ok := tfMap["mfa_delete"].(bool); ok { + if v { + apiObject.MFADelete = types.MFADeleteEnabled + } else { + apiObject.MFADelete = types.MFADeleteDisabled + } + } + + return apiObject +} + +func flattenBucketVersioning(config *s3.GetBucketVersioningOutput) []interface{} { + if config == nil { + return []interface{}{} + } + + m := map[string]interface{}{ + "enabled": config.Status == types.BucketVersioningStatusEnabled, + "mfa_delete": config.MFADelete == types.MFADeleteStatusEnabled, + } + + return []interface{}{m} +} + +// +// Bucket ACL. +// -func expandGrants(l []interface{}) []*s3.Grant { - var grants []*s3.Grant +func expandBucketGrants(l []interface{}) []types.Grant { + var grants []types.Grant for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) @@ -1789,71 +1999,101 @@ func expandGrants(l []interface{}) []*s3.Grant { continue } - grantee := &s3.Grantee{} + grantee := &types.Grantee{} if v, ok := tfMap["id"].(string); ok && v != "" { - grantee.SetID(v) + grantee.ID = aws.String(v) } if v, ok := tfMap["type"].(string); ok && v != "" { - grantee.SetType(v) + grantee.Type = types.Type(v) } if v, ok := tfMap["uri"].(string); ok && v != "" { - grantee.SetURI(v) + grantee.URI = aws.String(v) } - g := &s3.Grant{ + grant := types.Grant{ Grantee: grantee, - Permission: aws.String(permission), + Permission: types.Permission(permission), } - grants = append(grants, g) + grants = append(grants, grant) } } } + return grants } -func flattenGrants(ap *s3.GetBucketAclOutput) []interface{} { - if len(ap.Grants) == 0 { +func flattenBucketGrants(apiObject *s3.GetBucketAclOutput) []interface{} { + if len(apiObject.Grants) == 0 { return []interface{}{} } getGrant := func(grants []interface{}, grantee map[string]interface{}) (interface{}, bool) { - for _, pg := range grants { - pgt := pg.(map[string]interface{}) - if pgt["type"] == grantee["type"] && pgt["id"] == grantee["id"] && pgt["uri"] == grantee["uri"] && - pgt["permissions"].(*schema.Set).Len() > 0 { - return pg, true + for _, grant := range grants { + tfMap := grant.(map[string]interface{}) + if tfMap["type"] == grantee["type"] && tfMap["id"] == grantee["id"] && tfMap["uri"] == grantee["uri"] && tfMap["permissions"].(*schema.Set).Len() > 0 { + return grant, true } } return nil, false } - grants := make([]interface{}, 0, len(ap.Grants)) - for _, granteeObject := range ap.Grants { - grantee := make(map[string]interface{}) - grantee["type"] = aws.StringValue(granteeObject.Grantee.Type) + results := make([]interface{}, 0, len(apiObject.Grants)) + + for _, apiObject := range apiObject.Grants { + grantee := apiObject.Grantee - if granteeObject.Grantee.ID != nil { - grantee["id"] = aws.StringValue(granteeObject.Grantee.ID) + m := map[string]interface{}{ + "type": grantee.Type, + } + + if grantee.ID != nil { + m["id"] = aws.ToString(grantee.ID) } - if granteeObject.Grantee.URI != nil { - grantee["uri"] = aws.StringValue(granteeObject.Grantee.URI) + + if grantee.URI != nil { + m["uri"] = aws.ToString(grantee.URI) } - if pg, ok := getGrant(grants, grantee); ok { - pg.(map[string]interface{})["permissions"].(*schema.Set).Add(aws.StringValue(granteeObject.Permission)) + + if v, ok := getGrant(results, m); ok { + v.(map[string]interface{})["permissions"].(*schema.Set).Add(apiObject.Permission) } else { - grantee["permissions"] = schema.NewSet(schema.HashString, []interface{}{aws.StringValue(granteeObject.Permission)}) - grants = append(grants, grantee) + m["permissions"] = schema.NewSet(schema.HashString, []interface{}{apiObject.Permission}) + results = append(results, m) } } - return grants + return results +} + +// +// Bucket Logging. +// + +func flattenBucketLoggingEnabled(apiObject *types.LoggingEnabled) []interface{} { + if apiObject == nil { + return []interface{}{} + } + + m := make(map[string]interface{}) + + if apiObject.TargetBucket != nil { + m["target_bucket"] = aws.ToString(apiObject.TargetBucket) + } + + if apiObject.TargetPrefix != nil { + m["target_prefix"] = aws.ToString(apiObject.TargetPrefix) + } + + return []interface{}{m} } -// Lifecycle Rule functions +// +// Bucket Lifecycle Configuration. +// func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.LifecycleRule { if len(l) == 0 || l[0] == nil { @@ -1878,7 +2118,7 @@ func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.Li } if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 { - result.Expiration = expandBucketLifecycleRuleExpiration(v) + result.Expiration = expandBucketLifecycleExpiration(v) } var filter types.LifecycleRuleFilter @@ -1904,11 +2144,11 @@ func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.Li } if v, ok := tfMap["noncurrent_version_expiration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.NoncurrentVersionExpiration = expandBucketLifecycleRuleNoncurrentVersionExpiration(v[0].(map[string]interface{})) + result.NoncurrentVersionExpiration = expandBucketNoncurrentVersionExpiration(v[0].(map[string]interface{})) } if v, ok := tfMap["noncurrent_version_transition"].(*schema.Set); ok && v.Len() > 0 { - result.NoncurrentVersionTransitions = expandBucketLifecycleRuleNoncurrentVersionTransitions(v.List()) + result.NoncurrentVersionTransitions = expandBucketNoncurrentVersionTransition(v.List()) } if v, ok := tfMap["enabled"].(bool); ok && v { @@ -1918,7 +2158,7 @@ func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.Li } if v, ok := tfMap["transition"].(*schema.Set); ok && v.Len() > 0 { - result.Transitions = expandBucketLifecycleRuleTransitions(v.List()) + result.Transitions = expandBucketTransitions(v.List()) } // As a lifecycle rule requires 1 or more transition/expiration actions, @@ -1934,7 +2174,7 @@ func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.Li return results } -func expandBucketLifecycleRuleExpiration(l []interface{}) *types.LifecycleExpiration { +func expandBucketLifecycleExpiration(l []interface{}) *types.LifecycleExpiration { if len(l) == 0 { return nil } @@ -1959,7 +2199,7 @@ func expandBucketLifecycleRuleExpiration(l []interface{}) *types.LifecycleExpira return result } -func expandBucketLifecycleRuleNoncurrentVersionExpiration(m map[string]interface{}) *types.NoncurrentVersionExpiration { +func expandBucketNoncurrentVersionExpiration(m map[string]interface{}) *types.NoncurrentVersionExpiration { if len(m) == 0 { return nil } @@ -1975,7 +2215,7 @@ func expandBucketLifecycleRuleNoncurrentVersionExpiration(m map[string]interface return result } -func expandBucketLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []types.NoncurrentVersionTransition { +func expandBucketNoncurrentVersionTransition(l []interface{}) []types.NoncurrentVersionTransition { if len(l) == 0 || l[0] == nil { return nil } @@ -2004,7 +2244,7 @@ func expandBucketLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []ty return results } -func expandBucketLifecycleRuleTransitions(l []interface{}) []types.Transition { +func expandBucketTransitions(l []interface{}) []types.Transition { if len(l) == 0 || l[0] == nil { return nil } @@ -2036,161 +2276,112 @@ func expandBucketLifecycleRuleTransitions(l []interface{}) []types.Transition { return results } -func flattenBucketLifecycleRuleExpiration(expiration *s3.LifecycleExpiration) []interface{} { - if expiration == nil { - return []interface{}{} - } - - m := make(map[string]interface{}) - - if expiration.Date != nil { - m["date"] = (aws.TimeValue(expiration.Date)).Format("2006-01-02") - } - if expiration.Days != nil { - m["days"] = int(aws.Int64Value(expiration.Days)) - } - if expiration.ExpiredObjectDeleteMarker != nil { - m["expired_object_delete_marker"] = aws.BoolValue(expiration.ExpiredObjectDeleteMarker) - } - - return []interface{}{m} -} - -func flattenBucketLifecycleRules(ctx context.Context, lifecycleRules []*s3.LifecycleRule) []interface{} { - if len(lifecycleRules) == 0 { +func flattenBucketLifecycleRules(ctx context.Context, rules []types.LifecycleRule) []interface{} { + if len(rules) == 0 { return []interface{}{} } var results []interface{} - for _, lifecycleRule := range lifecycleRules { - if lifecycleRule == nil { - continue - } - - rule := make(map[string]interface{}) + for _, rule := range rules { + m := make(map[string]interface{}) - // AbortIncompleteMultipartUploadDays - if lifecycleRule.AbortIncompleteMultipartUpload != nil { - if lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation != nil { - rule["abort_incomplete_multipart_upload_days"] = int(aws.Int64Value(lifecycleRule.AbortIncompleteMultipartUpload.DaysAfterInitiation)) + if apiObject := rule.AbortIncompleteMultipartUpload; apiObject != nil { + if apiObject.DaysAfterInitiation != nil { + m["abort_incomplete_multipart_upload_days"] = aws.ToInt32(apiObject.DaysAfterInitiation) } } - // ID - if lifecycleRule.ID != nil { - rule["id"] = aws.StringValue(lifecycleRule.ID) + if rule.Expiration != nil { + m["expiration"] = flattenBucketLifecycleExpiration(rule.Expiration) } - // Filter - if filter := lifecycleRule.Filter; filter != nil { - if filter.And != nil { - // Prefix - if filter.And.Prefix != nil { - rule["prefix"] = aws.StringValue(filter.And.Prefix) - } - // Tag - if len(filter.And.Tags) > 0 { - rule["tags"] = KeyValueTags(ctx, filter.And.Tags).IgnoreAWS().Map() - } - } else { - // Prefix - if filter.Prefix != nil { - rule["prefix"] = aws.StringValue(filter.Prefix) + if filter := rule.Filter; filter != nil { + switch v := filter.(type) { + case *types.LifecycleRuleFilterMemberAnd: + if v := v.Value.Prefix; v != nil { + m["prefix"] = aws.ToString(v) } - // Tag - if filter.Tag != nil { - rule["tags"] = KeyValueTags(ctx, []*s3.Tag{filter.Tag}).IgnoreAWS().Map() + if v := v.Value.Tags; v != nil { + m["tags"] = keyValueTags(ctx, v).IgnoreAWS().Map() } + case *types.LifecycleRuleFilterMemberPrefix: + m["prefix"] = v.Value + case *types.LifecycleRuleFilterMemberTag: + m["tags"] = keyValueTags(ctx, tfslices.Of(v.Value)).IgnoreAWS().Map() } } - // Prefix - if lifecycleRule.Prefix != nil { - rule["prefix"] = aws.StringValue(lifecycleRule.Prefix) + if rule.ID != nil { + m["id"] = aws.ToString(rule.ID) } - // Enabled - if lifecycleRule.Status != nil { - if aws.StringValue(lifecycleRule.Status) == s3.ExpirationStatusEnabled { - rule["enabled"] = true - } else { - rule["enabled"] = false - } + if rule.Prefix != nil { + m["prefix"] = aws.ToString(rule.Prefix) } - // Expiration - if lifecycleRule.Expiration != nil { - rule["expiration"] = flattenBucketLifecycleRuleExpiration(lifecycleRule.Expiration) - } + m["enabled"] = rule.Status == types.ExpirationStatusEnabled + + if rule.NoncurrentVersionExpiration != nil { + tfMap := make(map[string]interface{}) - // NoncurrentVersionExpiration - if lifecycleRule.NoncurrentVersionExpiration != nil { - e := make(map[string]interface{}) - if lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays != nil { - e["days"] = int(aws.Int64Value(lifecycleRule.NoncurrentVersionExpiration.NoncurrentDays)) + if apiObject := rule.NoncurrentVersionExpiration.NoncurrentDays; apiObject != nil { + tfMap["days"] = aws.ToInt32(apiObject) } - rule["noncurrent_version_expiration"] = []interface{}{e} + + m["noncurrent_version_expiration"] = []interface{}{tfMap} } - // NoncurrentVersionTransition - if len(lifecycleRule.NoncurrentVersionTransitions) > 0 { - rule["noncurrent_version_transition"] = flattenBucketLifecycleRuleNoncurrentVersionTransitions(lifecycleRule.NoncurrentVersionTransitions) + if rule.NoncurrentVersionTransitions != nil { + m["noncurrent_version_transition"] = flattenBucketNoncurrentVersionTransitions(rule.NoncurrentVersionTransitions) } - // Transition - if len(lifecycleRule.Transitions) > 0 { - rule["transition"] = flattenBucketLifecycleRuleTransitions(lifecycleRule.Transitions) + if rule.Transitions != nil { + m["transition"] = flattenBucketTransitions(rule.Transitions) } - results = append(results, rule) + results = append(results, m) } return results } -func flattenBucketLifecycleRuleNoncurrentVersionTransitions(transitions []*s3.NoncurrentVersionTransition) []interface{} { - if len(transitions) == 0 { +func flattenBucketLifecycleExpiration(expiration *types.LifecycleExpiration) []interface{} { + if expiration == nil { return []interface{}{} } - var results []interface{} - - for _, t := range transitions { - m := make(map[string]interface{}) + m := make(map[string]interface{}) - if t.NoncurrentDays != nil { - m["days"] = int(aws.Int64Value(t.NoncurrentDays)) - } + if expiration.Date != nil { + m["date"] = expiration.Date.Format("2006-01-02") + } - if t.StorageClass != nil { - m["storage_class"] = aws.StringValue(t.StorageClass) - } + if expiration.Days != nil { + m["days"] = aws.ToInt32(expiration.Days) + } - results = append(results, m) + if expiration.ExpiredObjectDeleteMarker != nil { + m["expired_object_delete_marker"] = aws.ToBool(expiration.ExpiredObjectDeleteMarker) } - return results + return []interface{}{m} } -func flattenBucketLifecycleRuleTransitions(transitions []*s3.Transition) []interface{} { +func flattenBucketNoncurrentVersionTransitions(transitions []types.NoncurrentVersionTransition) []interface{} { if len(transitions) == 0 { return []interface{}{} } var results []interface{} - for _, t := range transitions { - m := make(map[string]interface{}) - - if t.Date != nil { - m["date"] = (aws.TimeValue(t.Date)).Format("2006-01-02") - } - if t.Days != nil { - m["days"] = int(aws.Int64Value(t.Days)) + for _, transition := range transitions { + m := map[string]interface{}{ + "storage_class": transition.StorageClass, } - if t.StorageClass != nil { - m["storage_class"] = aws.StringValue(t.StorageClass) + + if transition.NoncurrentDays != nil { + m["days"] = aws.ToInt32(transition.NoncurrentDays) } results = append(results, m) @@ -2199,141 +2390,35 @@ func flattenBucketLifecycleRuleTransitions(transitions []*s3.Transition) []inter return results } -// Logging functions - -func flattenBucketLoggingEnabled(loggingEnabled *s3.LoggingEnabled) []interface{} { - if loggingEnabled == nil { +func flattenBucketTransitions(transitions []types.Transition) []interface{} { + if len(transitions) == 0 { return []interface{}{} } - m := make(map[string]interface{}) - - if loggingEnabled.TargetBucket != nil { - m["target_bucket"] = aws.StringValue(loggingEnabled.TargetBucket) - } - if loggingEnabled.TargetPrefix != nil { - m["target_prefix"] = aws.StringValue(loggingEnabled.TargetPrefix) - } - - return []interface{}{m} -} - -func expandBucketServerSideEncryptionRules(l []interface{}) []types.ServerSideEncryptionRule { - var rules []types.ServerSideEncryptionRule + var results []interface{} - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { - continue + for _, transition := range transitions { + m := map[string]interface{}{ + "storage_class": transition.StorageClass, } - rule := types.ServerSideEncryptionRule{} - - if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionByDefault(v) + if transition.Date != nil { + m["date"] = transition.Date.Format("2006-01-02") } - if v, ok := tfMap["bucket_key_enabled"].(bool); ok { - rule.BucketKeyEnabled = aws.Bool(v) + if transition.Days != nil { + m["days"] = aws.ToInt32(transition.Days) } - rules = append(rules, rule) - } - - return rules -} - -func expandBucketServerSideEncryptionByDefault(l []interface{}) *types.ServerSideEncryptionByDefault { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - if !ok { - return nil - } - - sse := &types.ServerSideEncryptionByDefault{} - - if v, ok := tfMap["kms_master_key_id"].(string); ok && v != "" { - sse.KMSMasterKeyID = aws.String(v) - } - - if v, ok := tfMap["sse_algorithm"].(string); ok && v != "" { - sse.SSEAlgorithm = types.ServerSideEncryption(v) - } - - return sse -} - -// Object Lock Configuration functions - -func expandBucketObjectLockConfiguration(l []interface{}) *types.ObjectLockConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - if !ok { - return nil - } - - apiObject := &types.ObjectLockConfiguration{} - - if v, ok := tfMap["object_lock_enabled"].(string); ok && v != "" { - apiObject.ObjectLockEnabled = types.ObjectLockEnabled(v) - } - - if v, ok := tfMap["rule"].([]interface{}); ok && len(v) > 0 { - tfMap := v[0].(map[string]interface{}) - - if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - tfMap := v[0].(map[string]interface{}) - - apiObject.Rule = &types.ObjectLockRule{ - DefaultRetention: &types.DefaultRetention{}, - } - - if v, ok := tfMap["days"].(int); ok && v > 0 { - apiObject.Rule.DefaultRetention.Days = aws.Int32(int32(v)) - } - if v, ok := tfMap["mode"].(string); ok && v != "" { - apiObject.Rule.DefaultRetention.Mode = types.ObjectLockRetentionMode(v) - } - if v, ok := tfMap["years"].(int); ok && v > 0 { - apiObject.Rule.DefaultRetention.Years = aws.Int32(int32(v)) - } - } + results = append(results, m) } - return apiObject + return results } -func flattenObjectLockConfiguration(conf *s3.ObjectLockConfiguration) []interface{} { - if conf == nil { - return []interface{}{} - } - - mConf := map[string]interface{}{ - "object_lock_enabled": aws.StringValue(conf.ObjectLockEnabled), - } - - if conf.Rule != nil && conf.Rule.DefaultRetention != nil { - mRule := map[string]interface{}{ - "default_retention": []interface{}{ - map[string]interface{}{ - "mode": aws.StringValue(conf.Rule.DefaultRetention.Mode), - "days": int(aws.Int64Value(conf.Rule.DefaultRetention.Days)), - "years": int(aws.Int64Value(conf.Rule.DefaultRetention.Years)), - }, - }, - } - - mConf["rule"] = []interface{}{mRule} - } - - return []interface{}{mConf} -} +// +// Bucket Replication Configuration. +// func expandBucketReplicationConfiguration(ctx context.Context, l []interface{}) *types.ReplicationConfiguration { if len(l) == 0 || l[0] == nil { @@ -2352,13 +2437,13 @@ func expandBucketReplicationConfiguration(ctx context.Context, l []interface{}) } if v, ok := tfMap["rules"].(*schema.Set); ok && v.Len() > 0 { - apiObject.Rules = expandBucketReplicationConfigurationRules(ctx, v.List()) + apiObject.Rules = expandBucketReplicationRules(ctx, v.List()) } return apiObject } -func expandBucketReplicationConfigurationRules(ctx context.Context, l []interface{}) []types.ReplicationRule { +func expandBucketReplicationRules(ctx context.Context, l []interface{}) []types.ReplicationRule { var rules []types.ReplicationRule for _, tfMapRaw := range l { @@ -2376,7 +2461,7 @@ func expandBucketReplicationConfigurationRules(ctx context.Context, l []interfac } if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Destination = expandBucketReplicationConfigurationRulesDestination(v) + rule.Destination = expandBucketDestination(v) } else { rule.Destination = &types.Destination{} } @@ -2386,11 +2471,7 @@ func expandBucketReplicationConfigurationRules(ctx context.Context, l []interfac } if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) - } - - if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.SourceSelectionCriteria = expandBucketReplicationConfigurationRulesSourceSelectionCriteria(v) + rule.SourceSelectionCriteria = expandBucketSourceSelectionCriteria(v) } if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { @@ -2434,7 +2515,7 @@ func expandBucketReplicationConfigurationRules(ctx context.Context, l []interfac return rules } -func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *types.Destination { +func expandBucketDestination(l []interface{}) *types.Destination { if len(l) == 0 || l[0] == nil { return nil } @@ -2465,36 +2546,39 @@ func expandBucketReplicationConfigurationRulesDestination(l []interface{}) *type } if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - aclTranslationValues := v[0].(map[string]interface{}) - ruleAclTranslation := &s3.AccessControlTranslation{} - ruleAclTranslation.Owner = aws.String(aclTranslationValues["owner"].(string)) - apiObject.AccessControlTranslation = ruleAclTranslation + tfMap := v[0].(map[string]interface{}) + + apiObject.AccessControlTranslation = &types.AccessControlTranslation{ + Owner: types.OwnerOverride(tfMap["owner"].(string)), + } } - // replication metrics (required for RTC) if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - metricsConfig := &s3.Metrics{} - metricsValues := v[0].(map[string]interface{}) - metricsConfig.EventThreshold = &s3.ReplicationTimeValue{} - metricsConfig.Status = aws.String(metricsValues["status"].(string)) - metricsConfig.EventThreshold.Minutes = aws.Int64(int64(metricsValues["minutes"].(int))) - apiObject.Metrics = metricsConfig + tfMap := v[0].(map[string]interface{}) + + apiObject.Metrics = &types.Metrics{ + EventThreshold: &types.ReplicationTimeValue{ + Minutes: aws.Int32(int32(tfMap["minutes"].(int))), + }, + Status: types.MetricsStatus(tfMap["status"].(string)), + } } - // replication time control (RTC) if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rtcValues := v[0].(map[string]interface{}) - rtcConfig := &s3.ReplicationTime{} - rtcConfig.Status = aws.String(rtcValues["status"].(string)) - rtcConfig.Time = &s3.ReplicationTimeValue{} - rtcConfig.Time.Minutes = aws.Int64(int64(rtcValues["minutes"].(int))) - apiObject.ReplicationTime = rtcConfig + tfMap := v[0].(map[string]interface{}) + + apiObject.ReplicationTime = &types.ReplicationTime{ + Status: types.ReplicationTimeStatus(tfMap["status"].(string)), + Time: &types.ReplicationTimeValue{ + Minutes: aws.Int32(int32(tfMap["minutes"].(int))), + }, + } } return apiObject } -func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interface{}) *types.SourceSelectionCriteria { +func expandBucketSourceSelectionCriteria(l []interface{}) *types.SourceSelectionCriteria { if len(l) == 0 || l[0] == nil { return nil } @@ -2522,102 +2606,148 @@ func expandBucketReplicationConfigurationRulesSourceSelectionCriteria(l []interf return result } -func flattenBucketReplicationConfiguration(ctx context.Context, r *s3.ReplicationConfiguration) []interface{} { - if r == nil { +func flattenBucketReplicationConfiguration(ctx context.Context, apiObject *types.ReplicationConfiguration) []interface{} { + if apiObject == nil { return []interface{}{} } m := make(map[string]interface{}) - if r.Role != nil { - m["role"] = aws.StringValue(r.Role) + if apiObject.Role != nil { + m["role"] = aws.ToString(apiObject.Role) } - if len(r.Rules) > 0 { - m["rules"] = flattenBucketReplicationConfigurationReplicationRules(ctx, r.Rules) + if len(apiObject.Rules) > 0 { + m["rules"] = flattenBucketReplicationRules(ctx, apiObject.Rules) } return []interface{}{m} } -func flattenBucketReplicationConfigurationReplicationRuleDestination(d *s3.Destination) []interface{} { - if d == nil { +func flattenBucketReplicationRules(ctx context.Context, rules []types.ReplicationRule) []interface{} { + if len(rules) == 0 { return []interface{}{} } - m := make(map[string]interface{}) + var results []interface{} + + for _, rule := range rules { + m := map[string]interface{}{ + "status": rule.Status, + } + + if apiObject := rule.DeleteMarkerReplication; apiObject != nil { + if apiObject.Status == types.DeleteMarkerReplicationStatusEnabled { + m["delete_marker_replication_status"] = apiObject.Status + } + } + + if rule.Destination != nil { + m["destination"] = flattenBucketDestination(rule.Destination) + } - if d.Bucket != nil { - m["bucket"] = aws.StringValue(d.Bucket) + if rule.Filter != nil { + m["filter"] = flattenBucketReplicationRuleFilter(ctx, rule.Filter) + } + + if rule.ID != nil { + m["id"] = aws.ToString(rule.ID) + } + + if rule.Priority != nil { + m["priority"] = aws.ToInt32(rule.Priority) + } + + if rule.Prefix != nil { + m["prefix"] = aws.ToString(rule.Prefix) + } + + if rule.SourceSelectionCriteria != nil { + m["source_selection_criteria"] = flattenBucketSourceSelectionCriteria(rule.SourceSelectionCriteria) + } + + results = append(results, m) } - if d.StorageClass != nil { - m["storage_class"] = aws.StringValue(d.StorageClass) + return results +} + +func flattenBucketDestination(dest *types.Destination) []interface{} { + if dest == nil { + return []interface{}{} } - if d.ReplicationTime != nil { - rtc := map[string]interface{}{ - "minutes": int(aws.Int64Value(d.ReplicationTime.Time.Minutes)), - "status": aws.StringValue(d.ReplicationTime.Status), - } - m["replication_time"] = []interface{}{rtc} + m := map[string]interface{}{ + "storage_class": dest.StorageClass, } - if d.Metrics != nil { - metrics := map[string]interface{}{ - "status": aws.StringValue(d.Metrics.Status), + if apiObject := dest.AccessControlTranslation; apiObject != nil { + tfMap := map[string]interface{}{ + "owner": apiObject.Owner, } - if d.Metrics.EventThreshold != nil { - metrics["minutes"] = int(aws.Int64Value(d.Metrics.EventThreshold.Minutes)) - } + m["access_control_translation"] = []interface{}{tfMap} + } + + if dest.Account != nil { + m["account"] = aws.ToString(dest.Account) + } - m["metrics"] = []interface{}{metrics} + if dest.Bucket != nil { + m["bucket"] = aws.ToString(dest.Bucket) } - if d.EncryptionConfiguration != nil { - if d.EncryptionConfiguration.ReplicaKmsKeyID != nil { - m["replica_kms_key_id"] = aws.StringValue(d.EncryptionConfiguration.ReplicaKmsKeyID) + + if apiObject := dest.EncryptionConfiguration; apiObject != nil { + if apiObject.ReplicaKmsKeyID != nil { + m["replica_kms_key_id"] = aws.ToString(apiObject.ReplicaKmsKeyID) } } - if d.Account != nil { - m["account_id"] = aws.StringValue(d.Account) + if apiObject := dest.Metrics; apiObject != nil { + tfMap := map[string]interface{}{ + "status": apiObject.Status, + } + + if apiObject.EventThreshold != nil { + tfMap["minutes"] = aws.ToInt32(apiObject.EventThreshold.Minutes) + } + + m["metrics"] = []interface{}{tfMap} } - if d.AccessControlTranslation != nil { - rdt := map[string]interface{}{ - "owner": aws.StringValue(d.AccessControlTranslation.Owner), + if apiObject := dest.ReplicationTime; apiObject != nil { + tfMap := map[string]interface{}{ + "minutes": aws.ToInt32(apiObject.Time.Minutes), + "status": apiObject.Status, } - m["access_control_translation"] = []interface{}{rdt} + + m["replication_time"] = []interface{}{tfMap} } return []interface{}{m} } -func flattenBucketReplicationConfigurationReplicationRuleFilter(ctx context.Context, filter *s3.ReplicationRuleFilter) []interface{} { +func flattenBucketReplicationRuleFilter(ctx context.Context, filter types.ReplicationRuleFilter) []interface{} { if filter == nil { return []interface{}{} } m := make(map[string]interface{}) - if filter.Prefix != nil { - m["prefix"] = aws.StringValue(filter.Prefix) - } - - if filter.Tag != nil { - m["tags"] = KeyValueTags(ctx, []*s3.Tag{filter.Tag}).IgnoreAWS().Map() - } - - if filter.And != nil { - m["prefix"] = aws.StringValue(filter.And.Prefix) - m["tags"] = KeyValueTags(ctx, filter.And.Tags).IgnoreAWS().Map() + switch v := filter.(type) { + case *types.ReplicationRuleFilterMemberAnd: + m["prefix"] = aws.ToString(v.Value.Prefix) + m["tags"] = keyValueTags(ctx, v.Value.Tags).IgnoreAWS().Map() + case *types.ReplicationRuleFilterMemberPrefix: + m["prefix"] = v.Value + case *types.ReplicationRuleFilterMemberTag: + m["tags"] = keyValueTags(ctx, tfslices.Of(v.Value)).IgnoreAWS().Map() } return []interface{}{m} } -func flattenBucketReplicationConfigurationReplicationRuleSourceSelectionCriteria(ssc *s3.SourceSelectionCriteria) []interface{} { +func flattenBucketSourceSelectionCriteria(ssc *types.SourceSelectionCriteria) []interface{} { if ssc == nil { return []interface{}{} } @@ -2625,150 +2755,122 @@ func flattenBucketReplicationConfigurationReplicationRuleSourceSelectionCriteria m := make(map[string]interface{}) if ssc.SseKmsEncryptedObjects != nil { - m["sse_kms_encrypted_objects"] = flattenBucketReplicationConfigurationReplicationRuleSourceSelectionCriteriaSSEKMSEncryptedObjects(ssc.SseKmsEncryptedObjects) + m["sse_kms_encrypted_objects"] = flattenBucketSSEKMSEncryptedObjects(ssc.SseKmsEncryptedObjects) } return []interface{}{m} } -func flattenBucketReplicationConfigurationReplicationRuleSourceSelectionCriteriaSSEKMSEncryptedObjects(objs *s3.SseKmsEncryptedObjects) []interface{} { - if objs == nil { +func flattenBucketSSEKMSEncryptedObjects(objects *types.SseKmsEncryptedObjects) []interface{} { + if objects == nil { return []interface{}{} } m := make(map[string]interface{}) - if aws.StringValue(objs.Status) == s3.SseKmsEncryptedObjectsStatusEnabled { + if objects.Status == types.SseKmsEncryptedObjectsStatusEnabled { m["enabled"] = true - } else if aws.StringValue(objs.Status) == s3.SseKmsEncryptedObjectsStatusDisabled { + } else if objects.Status == types.SseKmsEncryptedObjectsStatusDisabled { m["enabled"] = false } return []interface{}{m} } -func flattenBucketReplicationConfigurationReplicationRules(ctx context.Context, rules []*s3.ReplicationRule) []interface{} { - var results []interface{} +// +// Bucket Server-side Encryption Configuration. +// - for _, rule := range rules { - if rule == nil { +func expandBucketServerSideEncryptionRules(l []interface{}) []types.ServerSideEncryptionRule { + var rules []types.ServerSideEncryptionRule + + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { continue } - m := make(map[string]interface{}) + rule := types.ServerSideEncryptionRule{} - if rule.Destination != nil { - m["destination"] = flattenBucketReplicationConfigurationReplicationRuleDestination(rule.Destination) + if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionByDefault(v) } - if rule.ID != nil { - m["id"] = aws.StringValue(rule.ID) + if v, ok := tfMap["bucket_key_enabled"].(bool); ok { + rule.BucketKeyEnabled = aws.Bool(v) } - if rule.Prefix != nil { - m["prefix"] = aws.StringValue(rule.Prefix) - } - if rule.Status != nil { - m["status"] = aws.StringValue(rule.Status) - } - if rule.SourceSelectionCriteria != nil { - m["source_selection_criteria"] = flattenBucketReplicationConfigurationReplicationRuleSourceSelectionCriteria(rule.SourceSelectionCriteria) - } + rules = append(rules, rule) + } - if rule.Priority != nil { - m["priority"] = int(aws.Int64Value(rule.Priority)) - } + return rules +} - if rule.Filter != nil { - m["filter"] = flattenBucketReplicationConfigurationReplicationRuleFilter(ctx, rule.Filter) - } +func expandBucketServerSideEncryptionByDefault(l []interface{}) *types.ServerSideEncryptionByDefault { + if len(l) == 0 || l[0] == nil { + return nil + } - if rule.DeleteMarkerReplication != nil { - if rule.DeleteMarkerReplication.Status != nil && aws.StringValue(rule.DeleteMarkerReplication.Status) == s3.DeleteMarkerReplicationStatusEnabled { - m["delete_marker_replication_status"] = aws.StringValue(rule.DeleteMarkerReplication.Status) - } - } + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } - results = append(results, m) + sse := &types.ServerSideEncryptionByDefault{} + + if v, ok := tfMap["kms_master_key_id"].(string); ok && v != "" { + sse.KMSMasterKeyID = aws.String(v) } - return results -} + if v, ok := tfMap["sse_algorithm"].(string); ok && v != "" { + sse.SSEAlgorithm = types.ServerSideEncryption(v) + } -// Server Side Encryption Configuration functions + return sse +} -func flattenServerSideEncryptionConfiguration(c *s3.ServerSideEncryptionConfiguration) []interface{} { - if c == nil { +func flattenBucketServerSideEncryptionConfiguration(apiObject *types.ServerSideEncryptionConfiguration) []interface{} { + if apiObject == nil { return []interface{}{} } m := map[string]interface{}{ - "rule": flattenServerSideEncryptionConfigurationRules(c.Rules), + "rule": flattenBucketServerSideEncryptionRules(apiObject.Rules), } return []interface{}{m} } -func flattenServerSideEncryptionConfigurationRules(rules []*s3.ServerSideEncryptionRule) []interface{} { +func flattenBucketServerSideEncryptionRules(rules []types.ServerSideEncryptionRule) []interface{} { var results []interface{} for _, rule := range rules { m := make(map[string]interface{}) - if rule.BucketKeyEnabled != nil { - m["bucket_key_enabled"] = aws.BoolValue(rule.BucketKeyEnabled) - } - - if rule.ApplyServerSideEncryptionByDefault != nil { + if apiObject := rule.ApplyServerSideEncryptionByDefault; apiObject != nil { m["apply_server_side_encryption_by_default"] = []interface{}{ map[string]interface{}{ - "kms_master_key_id": aws.StringValue(rule.ApplyServerSideEncryptionByDefault.KMSMasterKeyID), - "sse_algorithm": aws.StringValue(rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm), + "kms_master_key_id": aws.ToString(apiObject.KMSMasterKeyID), + "sse_algorithm": apiObject.SSEAlgorithm, }, } } + if rule.BucketKeyEnabled != nil { + m["bucket_key_enabled"] = aws.ToBool(rule.BucketKeyEnabled) + } + results = append(results, m) } return results } -// Versioning functions - -func expandBucketVersioningConfigurationCreate(l []interface{}) *types.VersioningConfiguration { - if len(l) == 0 || l[0] == nil { - return nil - } - - tfMap, ok := l[0].(map[string]interface{}) - if !ok { - return nil - } - - apiObject := &types.VersioningConfiguration{} - - // Only set and return a non-nil VersioningConfiguration with at least one of - // MFADelete or Status enabled as the PutBucketVersioning API request - // does not need to be made for new buckets that don't require versioning. - // Reference: https://github.com/hashicorp/terraform-provider-aws/issues/4494. - - if v, ok := tfMap["enabled"].(bool); ok && v { - apiObject.Status = types.BucketVersioningStatusEnabled - } - - if v, ok := tfMap["mfa_delete"].(bool); ok && v { - apiObject.MFADelete = types.MFADeleteEnabled - } - - if itypes.IsZero(&apiObject) { - return nil - } +// +// Bucket Object Lock Configuration. +// - return apiObject -} - -func expandBucketVersioningConfigurationUpdate(l []interface{}) *types.VersioningConfiguration { +func expandBucketObjectLockConfiguration(l []interface{}) *types.ObjectLockConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -2778,209 +2880,62 @@ func expandBucketVersioningConfigurationUpdate(l []interface{}) *types.Versionin return nil } - apiObject := &types.VersioningConfiguration{} - - if v, ok := tfMap["enabled"].(bool); ok { - if v { - apiObject.Status = types.BucketVersioningStatusEnabled - } else { - apiObject.Status = types.BucketVersioningStatusSuspended - } - } - - if v, ok := tfMap["mfa_delete"].(bool); ok { - if v { - apiObject.MFADelete = types.MFADeleteEnabled - } else { - apiObject.MFADelete = types.MFADeleteDisabled - } - } - - return apiObject -} - -func flattenVersioning(versioning *s3.GetBucketVersioningOutput) []interface{} { - if versioning == nil { - return []interface{}{} - } - - vc := make(map[string]interface{}) - - if aws.StringValue(versioning.Status) == s3.BucketVersioningStatusEnabled { - vc["enabled"] = true - } else { - vc["enabled"] = false - } - - if aws.StringValue(versioning.MFADelete) == s3.MFADeleteEnabled { - vc["mfa_delete"] = true - } else { - vc["mfa_delete"] = false - } - - return []interface{}{vc} -} - -// Website functions - -func expandBucketWebsiteConfiguration(l []interface{}) (*types.WebsiteConfiguration, error) { - if len(l) == 0 || l[0] == nil { - return nil, nil - } - - tfMap, ok := l[0].(map[string]interface{}) - if !ok { - return nil, nil - } - - websiteConfig := &types.WebsiteConfiguration{} + apiObject := &types.ObjectLockConfiguration{} - if v, ok := tfMap["index_document"].(string); ok && v != "" { - websiteConfig.IndexDocument = &types.IndexDocument{ - Suffix: aws.String(v), - } + if v, ok := tfMap["object_lock_enabled"].(string); ok && v != "" { + apiObject.ObjectLockEnabled = types.ObjectLockEnabled(v) } - if v, ok := tfMap["error_document"].(string); ok && v != "" { - websiteConfig.ErrorDocument = &types.ErrorDocument{ - Key: aws.String(v), - } - } + if v, ok := tfMap["rule"].([]interface{}); ok && len(v) > 0 { + tfMap := v[0].(map[string]interface{}) - if v, ok := tfMap["redirect_all_requests_to"].(string); ok && v != "" { - redirect, err := url.Parse(v) - if err == nil && redirect.Scheme != "" { - var buf bytes.Buffer + if v, ok := tfMap["default_retention"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + tfMap := v[0].(map[string]interface{}) - buf.WriteString(redirect.Host) - if redirect.Path != "" { - buf.WriteString(redirect.Path) - } - if redirect.RawQuery != "" { - buf.WriteString("?") - buf.WriteString(redirect.RawQuery) + apiObject.Rule = &types.ObjectLockRule{ + DefaultRetention: &types.DefaultRetention{}, } - websiteConfig.RedirectAllRequestsTo = &types.RedirectAllRequestsTo{ - HostName: aws.String(buf.String()), - Protocol: types.Protocol(redirect.Scheme), + + if v, ok := tfMap["days"].(int); ok && v > 0 { + apiObject.Rule.DefaultRetention.Days = aws.Int32(int32(v)) } - } else { - websiteConfig.RedirectAllRequestsTo = &types.RedirectAllRequestsTo{ - HostName: aws.String(v), + if v, ok := tfMap["mode"].(string); ok && v != "" { + apiObject.Rule.DefaultRetention.Mode = types.ObjectLockRetentionMode(v) } - } - } - - if v, ok := tfMap["routing_rules"].(string); ok && v != "" { - var routingRules []types.RoutingRule - if err := json.Unmarshal([]byte(v), &routingRules); err != nil { - return nil, err - } - websiteConfig.RoutingRules = routingRules - } - - return websiteConfig, nil -} - -func flattenBucketWebsite(ws *s3.GetBucketWebsiteOutput) ([]interface{}, error) { - if ws == nil { - return []interface{}{}, nil - } - - m := make(map[string]interface{}) - - if v := ws.IndexDocument; v != nil { - m["index_document"] = aws.StringValue(v.Suffix) - } - - if v := ws.ErrorDocument; v != nil { - m["error_document"] = aws.StringValue(v.Key) - } - - if v := ws.RedirectAllRequestsTo; v != nil { - if v.Protocol == nil { - m["redirect_all_requests_to"] = aws.StringValue(v.HostName) - } else { - var host string - var path string - var query string - parsedHostName, err := url.Parse(aws.StringValue(v.HostName)) - if err == nil { - host = parsedHostName.Host - path = parsedHostName.Path - query = parsedHostName.RawQuery - } else { - host = aws.StringValue(v.HostName) - path = "" + if v, ok := tfMap["years"].(int); ok && v > 0 { + apiObject.Rule.DefaultRetention.Years = aws.Int32(int32(v)) } - - m["redirect_all_requests_to"] = (&url.URL{ - Host: host, - Path: path, - Scheme: aws.StringValue(v.Protocol), - RawQuery: query, - }).String() } } - if v := ws.RoutingRules; v != nil { - rr, err := normalizeRoutingRules(v) - if err != nil { - return nil, fmt.Errorf("while marshaling routing rules: %w", err) - } - m["routing_rules"] = rr - } - - // We have special handling for the website configuration, - // so only return the configuration if there is any - if len(m) == 0 { - return []interface{}{}, nil - } - - return []interface{}{m}, nil + return apiObject } -func normalizeRoutingRules(w []*s3.RoutingRule) (string, error) { - withNulls, err := json.Marshal(w) - if err != nil { - return "", err - } - - var rules []map[string]interface{} - if err := json.Unmarshal(withNulls, &rules); err != nil { - return "", err - } - - var cleanRules []map[string]interface{} - for _, rule := range rules { - cleanRules = append(cleanRules, removeNil(rule)) +func flattenObjectLockConfiguration(apiObject *types.ObjectLockConfiguration) []interface{} { + if apiObject == nil { + return []interface{}{} } - withoutNulls, err := json.Marshal(cleanRules) - if err != nil { - return "", err + m := map[string]interface{}{ + "object_lock_enabled": apiObject.ObjectLockEnabled, } - return string(withoutNulls), nil -} - -func removeNil(data map[string]interface{}) map[string]interface{} { - withoutNil := make(map[string]interface{}) - - for k, v := range data { - if v == nil { - continue + if apiObject.Rule != nil && apiObject.Rule.DefaultRetention != nil { + apiObject := apiObject.Rule.DefaultRetention + tfMap := map[string]interface{}{ + "default_retention": []interface{}{ + map[string]interface{}{ + "days": aws.ToInt32(apiObject.Days), + "mode": apiObject.Mode, + "years": aws.ToInt32(apiObject.Years), + }, + }, } - switch v := v.(type) { - case map[string]interface{}: - withoutNil[k] = removeNil(v) - default: - withoutNil[k] = v - } + m["rule"] = []interface{}{tfMap} } - return withoutNil + return []interface{}{m} } // validBucketName validates any S3 bucket name that is not inside the us-east-1 region. diff --git a/internal/service/s3/bucket_acl.go b/internal/service/s3/bucket_acl.go index feb63829d38..689055b737a 100644 --- a/internal/service/s3/bucket_acl.go +++ b/internal/service/s3/bucket_acl.go @@ -152,7 +152,7 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta i } if v, ok := d.GetOk("access_control_policy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - input.AccessControlPolicy = expandBucketACLAccessControlPolicy(v.([]interface{})) + input.AccessControlPolicy = expandAccessControlPolicy(v.([]interface{})) } _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { @@ -188,7 +188,7 @@ func resourceBucketACLRead(ctx context.Context, d *schema.ResourceData, meta int return diag.FromErr(err) } - output, err := findBucketACL(ctx, conn, bucket, expectedBucketOwner) + bucketACL, err := findBucketACL(ctx, conn, bucket, expectedBucketOwner) if !d.IsNewResource() && tfresource.NotFound(err) { log.Printf("[WARN] S3 Bucket ACL (%s) not found, removing from state", d.Id()) @@ -200,7 +200,7 @@ func resourceBucketACLRead(ctx context.Context, d *schema.ResourceData, meta int return diag.Errorf("reading S3 Bucket ACL (%s): %s", d.Id(), err) } - if err := d.Set("access_control_policy", flattenBucketACLAccessControlPolicy(output)); err != nil { + if err := d.Set("access_control_policy", flattenBucketACL(bucketACL)); err != nil { return diag.Errorf("setting access_control_policy: %s", err) } d.Set("acl", acl) @@ -226,7 +226,7 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta i } if d.HasChange("access_control_policy") { - input.AccessControlPolicy = expandBucketACLAccessControlPolicy(d.Get("access_control_policy").([]interface{})) + input.AccessControlPolicy = expandAccessControlPolicy(d.Get("access_control_policy").([]interface{})) } if d.HasChange("acl") { @@ -248,7 +248,35 @@ func resourceBucketACLUpdate(ctx context.Context, d *schema.ResourceData, meta i return resourceBucketACLRead(ctx, d, meta) } -func expandBucketACLAccessControlPolicy(l []interface{}) *types.AccessControlPolicy { +func findBucketACL(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketAclOutput, error) { + input := &s3.GetBucketAclInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketAcl(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func expandAccessControlPolicy(l []interface{}) *types.AccessControlPolicy { if len(l) == 0 || l[0] == nil { return nil } @@ -261,17 +289,17 @@ func expandBucketACLAccessControlPolicy(l []interface{}) *types.AccessControlPol result := &types.AccessControlPolicy{} if v, ok := tfMap["grant"].(*schema.Set); ok && v.Len() > 0 { - result.Grants = expandBucketACLAccessControlPolicyGrants(v.List()) + result.Grants = expandGrants(v.List()) } if v, ok := tfMap["owner"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Owner = expandBucketACLAccessControlPolicyOwner(v) + result.Owner = expandOwner(v) } return result } -func expandBucketACLAccessControlPolicyGrants(l []interface{}) []types.Grant { +func expandGrants(l []interface{}) []types.Grant { var grants []types.Grant for _, tfMapRaw := range l { @@ -283,7 +311,7 @@ func expandBucketACLAccessControlPolicyGrants(l []interface{}) []types.Grant { grant := types.Grant{} if v, ok := tfMap["grantee"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - grant.Grantee = expandBucketACLAccessControlPolicyGrantsGrantee(v) + grant.Grantee = expandACLGrantee(v) } if v, ok := tfMap["permission"].(string); ok && v != "" { @@ -296,7 +324,7 @@ func expandBucketACLAccessControlPolicyGrants(l []interface{}) []types.Grant { return grants } -func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *types.Grantee { +func expandACLGrantee(l []interface{}) *types.Grantee { if len(l) == 0 || l[0] == nil { return nil } @@ -327,7 +355,7 @@ func expandBucketACLAccessControlPolicyGrantsGrantee(l []interface{}) *types.Gra return result } -func expandBucketACLAccessControlPolicyOwner(l []interface{}) *types.Owner { +func expandOwner(l []interface{}) *types.Owner { if len(l) == 0 || l[0] == nil { return nil } @@ -350,25 +378,25 @@ func expandBucketACLAccessControlPolicyOwner(l []interface{}) *types.Owner { return owner } -func flattenBucketACLAccessControlPolicy(output *s3.GetBucketAclOutput) []interface{} { - if output == nil { +func flattenBucketACL(apiObject *s3.GetBucketAclOutput) []interface{} { + if apiObject == nil { return []interface{}{} } m := make(map[string]interface{}) - if len(output.Grants) > 0 { - m["grant"] = flattenBucketACLAccessControlPolicyGrants(output.Grants) + if len(apiObject.Grants) > 0 { + m["grant"] = flattenGrants(apiObject.Grants) } - if output.Owner != nil { - m["owner"] = flattenBucketACLAccessControlPolicyOwner(output.Owner) + if apiObject.Owner != nil { + m["owner"] = flattenOwner(apiObject.Owner) } return []interface{}{m} } -func flattenBucketACLAccessControlPolicyGrants(grants []types.Grant) []interface{} { +func flattenGrants(grants []types.Grant) []interface{} { var results []interface{} for _, grant := range grants { @@ -377,7 +405,7 @@ func flattenBucketACLAccessControlPolicyGrants(grants []types.Grant) []interface } if grant.Grantee != nil { - m["grantee"] = flattenBucketACLAccessControlPolicyGrantsGrantee(grant.Grantee) + m["grantee"] = flattenACLGrantee(grant.Grantee) } results = append(results, m) @@ -386,7 +414,7 @@ func flattenBucketACLAccessControlPolicyGrants(grants []types.Grant) []interface return results } -func flattenBucketACLAccessControlPolicyGrantsGrantee(grantee *types.Grantee) []interface{} { +func flattenACLGrantee(grantee *types.Grantee) []interface{} { if grantee == nil { return []interface{}{} } @@ -414,7 +442,7 @@ func flattenBucketACLAccessControlPolicyGrantsGrantee(grantee *types.Grantee) [] return []interface{}{m} } -func flattenBucketACLAccessControlPolicyOwner(owner *types.Owner) []interface{} { +func flattenOwner(owner *types.Owner) []interface{} { if owner == nil { return []interface{}{} } @@ -502,34 +530,6 @@ func BucketACLParseResourceID(id string) (string, string, string, error) { "or BUCKET%[2]sEXPECTED_BUCKET_OWNER%[2]sACL", id, BucketACLSeparator) } -func findBucketACL(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketAclOutput, error) { - input := &s3.GetBucketAclInput{ - Bucket: aws.String(bucket), - } - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketAcl(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - // These should be defined in the AWS SDK for Go. There is an issue, https://github.com/aws/aws-sdk-go/issues/2683. const ( bucketCannedACLExecRead = "aws-exec-read" diff --git a/internal/service/s3/bucket_cors_configuration.go b/internal/service/s3/bucket_cors_configuration.go index b3801e59708..00a11ce8905 100644 --- a/internal/service/s3/bucket_cors_configuration.go +++ b/internal/service/s3/bucket_cors_configuration.go @@ -220,6 +220,34 @@ func resourceBucketCorsConfigurationDelete(ctx context.Context, d *schema.Resour return nil } +func findCORSRules(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) ([]types.CORSRule, error) { + input := &s3.GetBucketCorsInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketCors(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchCORSConfiguration) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || len(output.CORSRules) == 0 { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.CORSRules, nil +} + func expandCORSRules(l []interface{}) []types.CORSRule { if len(l) == 0 { return nil @@ -298,31 +326,3 @@ func flattenCORSRules(rules []types.CORSRule) []interface{} { return results } - -func findCORSRules(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) ([]types.CORSRule, error) { - input := &s3.GetBucketCorsInput{ - Bucket: aws.String(bucket), - } - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketCors(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchCORSConfiguration) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || len(output.CORSRules) == 0 { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.CORSRules, nil -} diff --git a/internal/service/s3/bucket_lifecycle_configuration.go b/internal/service/s3/bucket_lifecycle_configuration.go index 3425d0e5e74..957f25f1298 100644 --- a/internal/service/s3/bucket_lifecycle_configuration.go +++ b/internal/service/s3/bucket_lifecycle_configuration.go @@ -557,11 +557,11 @@ func expandLifecycleRules(ctx context.Context, l []interface{}) []types.Lifecycl result := types.LifecycleRule{} if v, ok := tfMap["abort_incomplete_multipart_upload"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.AbortIncompleteMultipartUpload = expandLifecycleRuleAbortIncompleteMultipartUpload(v[0].(map[string]interface{})) + result.AbortIncompleteMultipartUpload = expandAbortIncompleteMultipartUpload(v[0].(map[string]interface{})) } if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 { - result.Expiration = expandLifecycleRuleExpiration(v) + result.Expiration = expandLifecycleExpiration(v) } if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 { @@ -586,11 +586,11 @@ func expandLifecycleRules(ctx context.Context, l []interface{}) []types.Lifecycl } if v, ok := tfMap["noncurrent_version_expiration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.NoncurrentVersionExpiration = expandLifecycleRuleNoncurrentVersionExpiration(v[0].(map[string]interface{})) + result.NoncurrentVersionExpiration = expandNoncurrentVersionExpiration(v[0].(map[string]interface{})) } if v, ok := tfMap["noncurrent_version_transition"].(*schema.Set); ok && v.Len() > 0 { - result.NoncurrentVersionTransitions = expandLifecycleRuleNoncurrentVersionTransitions(v.List()) + result.NoncurrentVersionTransitions = expandNoncurrentVersionTransitions(v.List()) } if v, ok := tfMap["status"].(string); ok && v != "" { @@ -598,7 +598,7 @@ func expandLifecycleRules(ctx context.Context, l []interface{}) []types.Lifecycl } if v, ok := tfMap["transition"].(*schema.Set); ok && v.Len() > 0 { - result.Transitions = expandLifecycleRuleTransitions(v.List()) + result.Transitions = expandTransitions(v.List()) } results = append(results, result) @@ -607,7 +607,7 @@ func expandLifecycleRules(ctx context.Context, l []interface{}) []types.Lifecycl return results } -func expandLifecycleRuleAbortIncompleteMultipartUpload(m map[string]interface{}) *types.AbortIncompleteMultipartUpload { +func expandAbortIncompleteMultipartUpload(m map[string]interface{}) *types.AbortIncompleteMultipartUpload { if len(m) == 0 { return nil } @@ -621,7 +621,7 @@ func expandLifecycleRuleAbortIncompleteMultipartUpload(m map[string]interface{}) return result } -func expandLifecycleRuleExpiration(l []interface{}) *types.LifecycleExpiration { +func expandLifecycleExpiration(l []interface{}) *types.LifecycleExpiration { if len(l) == 0 { return nil } @@ -747,7 +747,7 @@ func expandLifecycleRuleFilterMemberTag(m map[string]interface{}) *types.Lifecyc return result } -func expandLifecycleRuleNoncurrentVersionExpiration(m map[string]interface{}) *types.NoncurrentVersionExpiration { +func expandNoncurrentVersionExpiration(m map[string]interface{}) *types.NoncurrentVersionExpiration { if len(m) == 0 { return nil } @@ -765,7 +765,7 @@ func expandLifecycleRuleNoncurrentVersionExpiration(m map[string]interface{}) *t return result } -func expandLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []types.NoncurrentVersionTransition { +func expandNoncurrentVersionTransitions(l []interface{}) []types.NoncurrentVersionTransition { if len(l) == 0 || l[0] == nil { return nil } @@ -799,7 +799,7 @@ func expandLifecycleRuleNoncurrentVersionTransitions(l []interface{}) []types.No return results } -func expandLifecycleRuleTransitions(l []interface{}) []types.Transition { +func expandTransitions(l []interface{}) []types.Transition { if len(l) == 0 || l[0] == nil { return nil } @@ -850,11 +850,11 @@ func flattenLifecycleRules(ctx context.Context, rules []types.LifecycleRule) []i } if rule.AbortIncompleteMultipartUpload != nil { - m["abort_incomplete_multipart_upload"] = flattenLifecycleRuleAbortIncompleteMultipartUpload(rule.AbortIncompleteMultipartUpload) + m["abort_incomplete_multipart_upload"] = flattenAbortIncompleteMultipartUpload(rule.AbortIncompleteMultipartUpload) } if rule.Expiration != nil { - m["expiration"] = flattenLifecycleRuleExpiration(rule.Expiration) + m["expiration"] = flattenLifecycleExpiration(rule.Expiration) } if rule.Filter != nil { @@ -866,11 +866,11 @@ func flattenLifecycleRules(ctx context.Context, rules []types.LifecycleRule) []i } if rule.NoncurrentVersionExpiration != nil { - m["noncurrent_version_expiration"] = flattenLifecycleRuleNoncurrentVersionExpiration(rule.NoncurrentVersionExpiration) + m["noncurrent_version_expiration"] = flattenNoncurrentVersionExpiration(rule.NoncurrentVersionExpiration) } if rule.NoncurrentVersionTransitions != nil { - m["noncurrent_version_transition"] = flattenLifecycleRuleNoncurrentVersionTransitions(rule.NoncurrentVersionTransitions) + m["noncurrent_version_transition"] = flattenNoncurrentVersionTransitions(rule.NoncurrentVersionTransitions) } if rule.Prefix != nil { @@ -878,7 +878,7 @@ func flattenLifecycleRules(ctx context.Context, rules []types.LifecycleRule) []i } if rule.Transitions != nil { - m["transition"] = flattenLifecycleRuleTransitions(rule.Transitions) + m["transition"] = flattenTransitions(rule.Transitions) } results = append(results, m) @@ -887,7 +887,7 @@ func flattenLifecycleRules(ctx context.Context, rules []types.LifecycleRule) []i return results } -func flattenLifecycleRuleAbortIncompleteMultipartUpload(u *types.AbortIncompleteMultipartUpload) []interface{} { +func flattenAbortIncompleteMultipartUpload(u *types.AbortIncompleteMultipartUpload) []interface{} { if u == nil { return []interface{}{} } @@ -895,13 +895,13 @@ func flattenLifecycleRuleAbortIncompleteMultipartUpload(u *types.AbortIncomplete m := make(map[string]interface{}) if u.DaysAfterInitiation != nil { - m["days_after_initiation"] = int(aws.ToInt32(u.DaysAfterInitiation)) + m["days_after_initiation"] = aws.ToInt32(u.DaysAfterInitiation) } return []interface{}{m} } -func flattenLifecycleRuleExpiration(expiration *types.LifecycleExpiration) []interface{} { +func flattenLifecycleExpiration(expiration *types.LifecycleExpiration) []interface{} { if expiration == nil { return []interface{}{} } @@ -913,7 +913,7 @@ func flattenLifecycleRuleExpiration(expiration *types.LifecycleExpiration) []int } if expiration.Days != nil { - m["days"] = int(aws.ToInt32(expiration.Days)) + m["days"] = aws.ToInt32(expiration.Days) } if expiration.ExpiredObjectDeleteMarker != nil { @@ -987,7 +987,7 @@ func flattenLifecycleRuleFilterMemberTag(op *types.LifecycleRuleFilterMemberTag) return []interface{}{m} } -func flattenLifecycleRuleNoncurrentVersionExpiration(expiration *types.NoncurrentVersionExpiration) []interface{} { +func flattenNoncurrentVersionExpiration(expiration *types.NoncurrentVersionExpiration) []interface{} { if expiration == nil { return []interface{}{} } @@ -999,13 +999,13 @@ func flattenLifecycleRuleNoncurrentVersionExpiration(expiration *types.Noncurren } if expiration.NoncurrentDays != nil { - m["noncurrent_days"] = int(aws.ToInt32(expiration.NoncurrentDays)) + m["noncurrent_days"] = aws.ToInt32(expiration.NoncurrentDays) } return []interface{}{m} } -func flattenLifecycleRuleNoncurrentVersionTransitions(transitions []types.NoncurrentVersionTransition) []interface{} { +func flattenNoncurrentVersionTransitions(transitions []types.NoncurrentVersionTransition) []interface{} { if len(transitions) == 0 { return []interface{}{} } @@ -1022,7 +1022,7 @@ func flattenLifecycleRuleNoncurrentVersionTransitions(transitions []types.Noncur } if transition.NoncurrentDays != nil { - m["noncurrent_days"] = int(aws.ToInt32(transition.NoncurrentDays)) + m["noncurrent_days"] = aws.ToInt32(transition.NoncurrentDays) } results = append(results, m) @@ -1031,7 +1031,7 @@ func flattenLifecycleRuleNoncurrentVersionTransitions(transitions []types.Noncur return results } -func flattenLifecycleRuleTransitions(transitions []types.Transition) []interface{} { +func flattenTransitions(transitions []types.Transition) []interface{} { if len(transitions) == 0 { return []interface{}{} } @@ -1040,7 +1040,6 @@ func flattenLifecycleRuleTransitions(transitions []types.Transition) []interface for _, transition := range transitions { m := map[string]interface{}{ - "days": transition.Days, "storage_class": transition.StorageClass, } @@ -1048,6 +1047,10 @@ func flattenLifecycleRuleTransitions(transitions []types.Transition) []interface m["date"] = transition.Date.Format(time.RFC3339) } + if transition.Days != nil { + m["days"] = aws.ToInt32(transition.Days) + } + results = append(results, m) } diff --git a/internal/service/s3/bucket_logging.go b/internal/service/s3/bucket_logging.go index 677ef9b8d03..52ebd5e356e 100644 --- a/internal/service/s3/bucket_logging.go +++ b/internal/service/s3/bucket_logging.go @@ -155,7 +155,7 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("target_grant"); ok && v.(*schema.Set).Len() > 0 { - input.BucketLoggingStatus.LoggingEnabled.TargetGrants = expandBucketLoggingTargetGrants(v.(*schema.Set).List()) + input.BucketLoggingStatus.LoggingEnabled.TargetGrants = expandTargetGrants(v.(*schema.Set).List()) } if v, ok := d.GetOk("target_object_key_format"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -211,7 +211,7 @@ func resourceBucketLoggingRead(ctx context.Context, d *schema.ResourceData, meta d.Set("bucket", bucket) d.Set("expected_bucket_owner", expectedBucketOwner) d.Set("target_bucket", loggingEnabled.TargetBucket) - if err := d.Set("target_grant", flattenBucketLoggingTargetGrants(loggingEnabled.TargetGrants)); err != nil { + if err := d.Set("target_grant", flattenTargetGrants(loggingEnabled.TargetGrants)); err != nil { return sdkdiag.AppendErrorf(diags, "setting target_grant: %s", err) } if loggingEnabled.TargetObjectKeyFormat != nil { @@ -249,7 +249,7 @@ func resourceBucketLoggingUpdate(ctx context.Context, d *schema.ResourceData, me } if v, ok := d.GetOk("target_grant"); ok && v.(*schema.Set).Len() > 0 { - input.BucketLoggingStatus.LoggingEnabled.TargetGrants = expandBucketLoggingTargetGrants(v.(*schema.Set).List()) + input.BucketLoggingStatus.LoggingEnabled.TargetGrants = expandTargetGrants(v.(*schema.Set).List()) } if v, ok := d.GetOk("target_object_key_format"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { @@ -325,7 +325,7 @@ func findLoggingEnabled(ctx context.Context, conn *s3.Client, bucketName, expect return output.LoggingEnabled, nil } -func expandBucketLoggingTargetGrants(l []interface{}) []types.TargetGrant { +func expandTargetGrants(l []interface{}) []types.TargetGrant { var grants []types.TargetGrant for _, tfMapRaw := range l { @@ -337,7 +337,7 @@ func expandBucketLoggingTargetGrants(l []interface{}) []types.TargetGrant { grant := types.TargetGrant{} if v, ok := tfMap["grantee"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - grant.Grantee = expandBucketLoggingTargetGrantGrantee(v) + grant.Grantee = expandLoggingGrantee(v) } if v, ok := tfMap["permission"].(string); ok && v != "" { @@ -350,7 +350,7 @@ func expandBucketLoggingTargetGrants(l []interface{}) []types.TargetGrant { return grants } -func expandBucketLoggingTargetGrantGrantee(l []interface{}) *types.Grantee { +func expandLoggingGrantee(l []interface{}) *types.Grantee { if len(l) == 0 || l[0] == nil { return nil } @@ -385,7 +385,7 @@ func expandBucketLoggingTargetGrantGrantee(l []interface{}) *types.Grantee { return grantee } -func flattenBucketLoggingTargetGrants(grants []types.TargetGrant) []interface{} { +func flattenTargetGrants(grants []types.TargetGrant) []interface{} { var results []interface{} for _, grant := range grants { @@ -394,7 +394,7 @@ func flattenBucketLoggingTargetGrants(grants []types.TargetGrant) []interface{} } if grant.Grantee != nil { - m["grantee"] = flattenBucketLoggingTargetGrantGrantee(grant.Grantee) + m["grantee"] = flattenLoggingGrantee(grant.Grantee) } results = append(results, m) @@ -403,7 +403,7 @@ func flattenBucketLoggingTargetGrants(grants []types.TargetGrant) []interface{} return results } -func flattenBucketLoggingTargetGrantGrantee(g *types.Grantee) []interface{} { +func flattenLoggingGrantee(g *types.Grantee) []interface{} { if g == nil { return []interface{}{} } diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index be71164bfb7..46e25fe6924 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -482,15 +482,15 @@ func expandReplicationRules(ctx context.Context, l []interface{}) []types.Replic rule := types.ReplicationRule{} if v, ok := tfMap["delete_marker_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.DeleteMarkerReplication = expandReplicationRuleDeleteMarkerReplication(v) + rule.DeleteMarkerReplication = expandDeleteMarkerReplication(v) } if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Destination = expandReplicationRuleDestination(v) + rule.Destination = expandDestination(v) } if v, ok := tfMap["existing_object_replication"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.ExistingObjectReplication = expandReplicationRuleExistingObjectReplication(v) + rule.ExistingObjectReplication = expandExistingObjectReplication(v) } if v, ok := tfMap["id"].(string); ok && v != "" { @@ -498,7 +498,7 @@ func expandReplicationRules(ctx context.Context, l []interface{}) []types.Replic } if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.SourceSelectionCriteria = expandReplicationRuleSourceSelectionCriteria(v) + rule.SourceSelectionCriteria = expandSourceSelectionCriteria(v) } if v, ok := tfMap["status"].(string); ok && v != "" { @@ -523,7 +523,7 @@ func expandReplicationRules(ctx context.Context, l []interface{}) []types.Replic return rules } -func expandReplicationRuleDeleteMarkerReplication(l []interface{}) *types.DeleteMarkerReplication { +func expandDeleteMarkerReplication(l []interface{}) *types.DeleteMarkerReplication { if len(l) == 0 || l[0] == nil { return nil } @@ -543,7 +543,7 @@ func expandReplicationRuleDeleteMarkerReplication(l []interface{}) *types.Delete return result } -func expandReplicationRuleDestination(l []interface{}) *types.Destination { +func expandDestination(l []interface{}) *types.Destination { if len(l) == 0 || l[0] == nil { return nil } @@ -557,7 +557,7 @@ func expandReplicationRuleDestination(l []interface{}) *types.Destination { result := &types.Destination{} if v, ok := tfMap["access_control_translation"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.AccessControlTranslation = expandReplicationRuleDestinationAccessControlTranslation(v) + result.AccessControlTranslation = expandAccessControlTranslation(v) } if v, ok := tfMap["account"].(string); ok && v != "" { @@ -569,15 +569,15 @@ func expandReplicationRuleDestination(l []interface{}) *types.Destination { } if v, ok := tfMap["encryption_configuration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.EncryptionConfiguration = expandReplicationRuleDestinationEncryptionConfiguration(v) + result.EncryptionConfiguration = expandEncryptionConfiguration(v) } if v, ok := tfMap["metrics"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Metrics = expandReplicationRuleDestinationMetrics(v) + result.Metrics = expandMetrics(v) } if v, ok := tfMap["replication_time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.ReplicationTime = expandReplicationRuleDestinationReplicationTime(v) + result.ReplicationTime = expandReplicationTime(v) } if v, ok := tfMap["storage_class"].(string); ok && v != "" { @@ -587,7 +587,7 @@ func expandReplicationRuleDestination(l []interface{}) *types.Destination { return result } -func expandReplicationRuleDestinationAccessControlTranslation(l []interface{}) *types.AccessControlTranslation { +func expandAccessControlTranslation(l []interface{}) *types.AccessControlTranslation { if len(l) == 0 || l[0] == nil { return nil } @@ -607,7 +607,7 @@ func expandReplicationRuleDestinationAccessControlTranslation(l []interface{}) * return result } -func expandReplicationRuleDestinationEncryptionConfiguration(l []interface{}) *types.EncryptionConfiguration { +func expandEncryptionConfiguration(l []interface{}) *types.EncryptionConfiguration { if len(l) == 0 || l[0] == nil { return nil } @@ -627,7 +627,7 @@ func expandReplicationRuleDestinationEncryptionConfiguration(l []interface{}) *t return result } -func expandReplicationRuleDestinationMetrics(l []interface{}) *types.Metrics { +func expandMetrics(l []interface{}) *types.Metrics { if len(l) == 0 || l[0] == nil { return nil } @@ -641,7 +641,7 @@ func expandReplicationRuleDestinationMetrics(l []interface{}) *types.Metrics { result := &types.Metrics{} if v, ok := tfMap["event_threshold"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.EventThreshold = expandReplicationRuleDestinationReplicationTimeValue(v) + result.EventThreshold = expandReplicationTimeValue(v) } if v, ok := tfMap["status"].(string); ok && v != "" { @@ -651,7 +651,7 @@ func expandReplicationRuleDestinationMetrics(l []interface{}) *types.Metrics { return result } -func expandReplicationRuleDestinationReplicationTime(l []interface{}) *types.ReplicationTime { +func expandReplicationTime(l []interface{}) *types.ReplicationTime { if len(l) == 0 || l[0] == nil { return nil } @@ -669,13 +669,13 @@ func expandReplicationRuleDestinationReplicationTime(l []interface{}) *types.Rep } if v, ok := tfMap["time"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.Time = expandReplicationRuleDestinationReplicationTimeValue(v) + result.Time = expandReplicationTimeValue(v) } return result } -func expandReplicationRuleDestinationReplicationTimeValue(l []interface{}) *types.ReplicationTimeValue { +func expandReplicationTimeValue(l []interface{}) *types.ReplicationTimeValue { if len(l) == 0 || l[0] == nil { return nil } @@ -695,7 +695,7 @@ func expandReplicationRuleDestinationReplicationTimeValue(l []interface{}) *type return result } -func expandReplicationRuleExistingObjectReplication(l []interface{}) *types.ExistingObjectReplication { +func expandExistingObjectReplication(l []interface{}) *types.ExistingObjectReplication { if len(l) == 0 || l[0] == nil { return nil } @@ -715,7 +715,7 @@ func expandReplicationRuleExistingObjectReplication(l []interface{}) *types.Exis return result } -func expandReplicationRuleSourceSelectionCriteria(l []interface{}) *types.SourceSelectionCriteria { +func expandSourceSelectionCriteria(l []interface{}) *types.SourceSelectionCriteria { if len(l) == 0 || l[0] == nil { return nil } @@ -728,17 +728,17 @@ func expandReplicationRuleSourceSelectionCriteria(l []interface{}) *types.Source result := &types.SourceSelectionCriteria{} if v, ok := tfMap["replica_modifications"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.ReplicaModifications = expandSourceSelectionCriteriaReplicaModifications(v) + result.ReplicaModifications = expandReplicaModifications(v) } if v, ok := tfMap["sse_kms_encrypted_objects"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - result.SseKmsEncryptedObjects = expandSourceSelectionCriteriaSSEKMSEncryptedObjects(v) + result.SseKmsEncryptedObjects = expandSSEKMSEncryptedObjects(v) } return result } -func expandSourceSelectionCriteriaReplicaModifications(l []interface{}) *types.ReplicaModifications { +func expandReplicaModifications(l []interface{}) *types.ReplicaModifications { if len(l) == 0 || l[0] == nil { return nil } @@ -758,7 +758,7 @@ func expandSourceSelectionCriteriaReplicaModifications(l []interface{}) *types.R return result } -func expandSourceSelectionCriteriaSSEKMSEncryptedObjects(l []interface{}) *types.SseKmsEncryptedObjects { +func expandSSEKMSEncryptedObjects(l []interface{}) *types.SseKmsEncryptedObjects { if len(l) == 0 || l[0] == nil { return nil } @@ -872,20 +872,19 @@ func flattenReplicationRules(ctx context.Context, rules []types.ReplicationRule) for _, rule := range rules { m := map[string]interface{}{ - "priority": rule.Priority, - "status": rule.Status, + "status": rule.Status, } if rule.DeleteMarkerReplication != nil { - m["delete_marker_replication"] = flattenReplicationRuleDeleteMarkerReplication(rule.DeleteMarkerReplication) + m["delete_marker_replication"] = flattenDeleteMarkerReplication(rule.DeleteMarkerReplication) } if rule.Destination != nil { - m["destination"] = flattenReplicationRuleDestination(rule.Destination) + m["destination"] = flattenDestination(rule.Destination) } if rule.ExistingObjectReplication != nil { - m["existing_object_replication"] = flattenReplicationRuleExistingObjectReplication(rule.ExistingObjectReplication) + m["existing_object_replication"] = flattenExistingObjectReplication(rule.ExistingObjectReplication) } if rule.Filter != nil { @@ -896,12 +895,16 @@ func flattenReplicationRules(ctx context.Context, rules []types.ReplicationRule) m["id"] = aws.ToString(rule.ID) } + if rule.Priority != nil { + m["priority"] = aws.ToInt32(rule.Priority) + } + if rule.Prefix != nil { m["prefix"] = aws.ToString(rule.Prefix) } if rule.SourceSelectionCriteria != nil { - m["source_selection_criteria"] = flattenReplicationRuleSourceSelectionCriteria(rule.SourceSelectionCriteria) + m["source_selection_criteria"] = flattenSourceSelectionCriteria(rule.SourceSelectionCriteria) } results = append(results, m) @@ -910,7 +913,7 @@ func flattenReplicationRules(ctx context.Context, rules []types.ReplicationRule) return results } -func flattenReplicationRuleDeleteMarkerReplication(dmr *types.DeleteMarkerReplication) []interface{} { +func flattenDeleteMarkerReplication(dmr *types.DeleteMarkerReplication) []interface{} { if dmr == nil { return []interface{}{} } @@ -922,7 +925,7 @@ func flattenReplicationRuleDeleteMarkerReplication(dmr *types.DeleteMarkerReplic return []interface{}{m} } -func flattenReplicationRuleDestination(dest *types.Destination) []interface{} { +func flattenDestination(dest *types.Destination) []interface{} { if dest == nil { return []interface{}{} } @@ -932,7 +935,7 @@ func flattenReplicationRuleDestination(dest *types.Destination) []interface{} { } if dest.AccessControlTranslation != nil { - m["access_control_translation"] = flattenReplicationRuleDestinationAccessControlTranslation(dest.AccessControlTranslation) + m["access_control_translation"] = flattenAccessControlTranslation(dest.AccessControlTranslation) } if dest.Account != nil { @@ -944,21 +947,21 @@ func flattenReplicationRuleDestination(dest *types.Destination) []interface{} { } if dest.EncryptionConfiguration != nil { - m["encryption_configuration"] = flattenReplicationRuleDestinationEncryptionConfiguration(dest.EncryptionConfiguration) + m["encryption_configuration"] = flattenEncryptionConfiguration(dest.EncryptionConfiguration) } if dest.Metrics != nil { - m["metrics"] = flattenReplicationRuleDestinationMetrics(dest.Metrics) + m["metrics"] = flattenMetrics(dest.Metrics) } if dest.ReplicationTime != nil { - m["replication_time"] = flattenReplicationRuleDestinationReplicationTime(dest.ReplicationTime) + m["replication_time"] = flattenReplicationReplicationTime(dest.ReplicationTime) } return []interface{}{m} } -func flattenReplicationRuleDestinationAccessControlTranslation(act *types.AccessControlTranslation) []interface{} { +func flattenAccessControlTranslation(act *types.AccessControlTranslation) []interface{} { if act == nil { return []interface{}{} } @@ -970,7 +973,7 @@ func flattenReplicationRuleDestinationAccessControlTranslation(act *types.Access return []interface{}{m} } -func flattenReplicationRuleDestinationEncryptionConfiguration(ec *types.EncryptionConfiguration) []interface{} { +func flattenEncryptionConfiguration(ec *types.EncryptionConfiguration) []interface{} { if ec == nil { return []interface{}{} } @@ -984,7 +987,7 @@ func flattenReplicationRuleDestinationEncryptionConfiguration(ec *types.Encrypti return []interface{}{m} } -func flattenReplicationRuleDestinationMetrics(metrics *types.Metrics) []interface{} { +func flattenMetrics(metrics *types.Metrics) []interface{} { if metrics == nil { return []interface{}{} } @@ -994,13 +997,13 @@ func flattenReplicationRuleDestinationMetrics(metrics *types.Metrics) []interfac } if metrics.EventThreshold != nil { - m["event_threshold"] = flattenReplicationRuleDestinationReplicationTimeValue(metrics.EventThreshold) + m["event_threshold"] = flattenReplicationTimeValue(metrics.EventThreshold) } return []interface{}{m} } -func flattenReplicationRuleDestinationReplicationTimeValue(rtv *types.ReplicationTimeValue) []interface{} { +func flattenReplicationTimeValue(rtv *types.ReplicationTimeValue) []interface{} { if rtv == nil { return []interface{}{} } @@ -1012,7 +1015,7 @@ func flattenReplicationRuleDestinationReplicationTimeValue(rtv *types.Replicatio return []interface{}{m} } -func flattenReplicationRuleDestinationReplicationTime(rt *types.ReplicationTime) []interface{} { +func flattenReplicationReplicationTime(rt *types.ReplicationTime) []interface{} { if rt == nil { return []interface{}{} } @@ -1022,13 +1025,13 @@ func flattenReplicationRuleDestinationReplicationTime(rt *types.ReplicationTime) } if rt.Time != nil { - m["time"] = flattenReplicationRuleDestinationReplicationTimeValue(rt.Time) + m["time"] = flattenReplicationTimeValue(rt.Time) } return []interface{}{m} } -func flattenReplicationRuleExistingObjectReplication(eor *types.ExistingObjectReplication) []interface{} { +func flattenExistingObjectReplication(eor *types.ExistingObjectReplication) []interface{} { if eor == nil { return []interface{}{} } @@ -1097,7 +1100,7 @@ func flattenReplicationRuleFilterMemberTag(op *types.ReplicationRuleFilterMember return []interface{}{m} } -func flattenReplicationRuleSourceSelectionCriteria(ssc *types.SourceSelectionCriteria) []interface{} { +func flattenSourceSelectionCriteria(ssc *types.SourceSelectionCriteria) []interface{} { if ssc == nil { return []interface{}{} } @@ -1105,17 +1108,17 @@ func flattenReplicationRuleSourceSelectionCriteria(ssc *types.SourceSelectionCri m := make(map[string]interface{}) if ssc.ReplicaModifications != nil { - m["replica_modifications"] = flattenSourceSelectionCriteriaReplicaModifications(ssc.ReplicaModifications) + m["replica_modifications"] = flattenReplicaModifications(ssc.ReplicaModifications) } if ssc.SseKmsEncryptedObjects != nil { - m["sse_kms_encrypted_objects"] = flattenSourceSelectionCriteriaSSEKMSEncryptedObjects(ssc.SseKmsEncryptedObjects) + m["sse_kms_encrypted_objects"] = flattenSSEKMSEncryptedObjects(ssc.SseKmsEncryptedObjects) } return []interface{}{m} } -func flattenSourceSelectionCriteriaReplicaModifications(rc *types.ReplicaModifications) []interface{} { +func flattenReplicaModifications(rc *types.ReplicaModifications) []interface{} { if rc == nil { return []interface{}{} } @@ -1127,7 +1130,7 @@ func flattenSourceSelectionCriteriaReplicaModifications(rc *types.ReplicaModific return []interface{}{m} } -func flattenSourceSelectionCriteriaSSEKMSEncryptedObjects(objects *types.SseKmsEncryptedObjects) []interface{} { +func flattenSSEKMSEncryptedObjects(objects *types.SseKmsEncryptedObjects) []interface{} { if objects == nil { return []interface{}{} } diff --git a/internal/service/s3/bucket_server_side_encryption_configuration.go b/internal/service/s3/bucket_server_side_encryption_configuration.go index e6aef383959..39967ae1857 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration.go @@ -288,14 +288,16 @@ func flattenServerSideEncryptionRules(rules []types.ServerSideEncryptionRule) [] var results []interface{} for _, rule := range rules { - m := map[string]interface{}{ - "bucket_key_enabled": rule.BucketKeyEnabled, - } + m := make(map[string]interface{}) if rule.ApplyServerSideEncryptionByDefault != nil { m["apply_server_side_encryption_by_default"] = flattenServerSideEncryptionByDefault(rule.ApplyServerSideEncryptionByDefault) } + if rule.BucketKeyEnabled != nil { + m["bucket_key_enabled"] = aws.ToBool(rule.BucketKeyEnabled) + } + results = append(results, m) } diff --git a/internal/service/s3/bucket_versioning.go b/internal/service/s3/bucket_versioning.go index bb289823521..211f5304e7a 100644 --- a/internal/service/s3/bucket_versioning.go +++ b/internal/service/s3/bucket_versioning.go @@ -167,7 +167,7 @@ func resourceBucketVersioningRead(ctx context.Context, d *schema.ResourceData, m d.Set("bucket", bucket) d.Set("expected_bucket_owner", expectedBucketOwner) - if err := d.Set("versioning_configuration", flattenBucketVersioningConfiguration(output)); err != nil { + if err := d.Set("versioning_configuration", flattenVersioning(output)); err != nil { return diag.Errorf("setting versioning_configuration: %s", err) } @@ -271,7 +271,7 @@ func expandBucketVersioningConfiguration(l []interface{}) *types.VersioningConfi return result } -func flattenBucketVersioningConfiguration(config *s3.GetBucketVersioningOutput) []interface{} { +func flattenVersioning(config *s3.GetBucketVersioningOutput) []interface{} { if config == nil { return []interface{}{} } diff --git a/internal/service/s3/bucket_website_configuration.go b/internal/service/s3/bucket_website_configuration.go index 6f112f6e237..1b0bc70c56a 100644 --- a/internal/service/s3/bucket_website_configuration.go +++ b/internal/service/s3/bucket_website_configuration.go @@ -184,19 +184,19 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res websiteConfig := &types.WebsiteConfiguration{} if v, ok := d.GetOk("error_document"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.ErrorDocument = expandBucketWebsiteConfigurationErrorDocument(v.([]interface{})) + websiteConfig.ErrorDocument = expandErrorDocument(v.([]interface{})) } if v, ok := d.GetOk("index_document"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.IndexDocument = expandBucketWebsiteConfigurationIndexDocument(v.([]interface{})) + websiteConfig.IndexDocument = expandIndexDocument(v.([]interface{})) } if v, ok := d.GetOk("redirect_all_requests_to"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.RedirectAllRequestsTo = expandBucketWebsiteConfigurationRedirectAllRequestsTo(v.([]interface{})) + websiteConfig.RedirectAllRequestsTo = expandRedirectAllRequestsTo(v.([]interface{})) } if v, ok := d.GetOk("routing_rule"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.RoutingRules = expandBucketWebsiteConfigurationRoutingRules(v.([]interface{})) + websiteConfig.RoutingRules = expandRoutingRules(v.([]interface{})) } if v, ok := d.GetOk("routing_rules"); ok { @@ -263,21 +263,21 @@ func resourceBucketWebsiteConfigurationRead(ctx context.Context, d *schema.Resou } d.Set("bucket", bucket) - if err := d.Set("error_document", flattenBucketWebsiteConfigurationErrorDocument(output.ErrorDocument)); err != nil { + if err := d.Set("error_document", flattenErrorDocument(output.ErrorDocument)); err != nil { return diag.Errorf("setting error_document: %s", err) } d.Set("expected_bucket_owner", expectedBucketOwner) - if err := d.Set("index_document", flattenBucketWebsiteConfigurationIndexDocument(output.IndexDocument)); err != nil { + if err := d.Set("index_document", flattenIndexDocument(output.IndexDocument)); err != nil { return diag.Errorf("setting index_document: %s", err) } - if err := d.Set("redirect_all_requests_to", flattenBucketWebsiteConfigurationRedirectAllRequestsTo(output.RedirectAllRequestsTo)); err != nil { + if err := d.Set("redirect_all_requests_to", flattenRedirectAllRequestsTo(output.RedirectAllRequestsTo)); err != nil { return diag.Errorf("setting redirect_all_requests_to: %s", err) } - if err := d.Set("routing_rule", flattenBucketWebsiteConfigurationRoutingRules(output.RoutingRules)); err != nil { + if err := d.Set("routing_rule", flattenRoutingRules(output.RoutingRules)); err != nil { return diag.Errorf("setting routing_rule: %s", err) } if output.RoutingRules != nil { - rr, err := normalizeRoutingRulesV2(output.RoutingRules) + rr, err := normalizeRoutingRules(output.RoutingRules) if err != nil { return diag.FromErr(err) } @@ -308,20 +308,20 @@ func resourceBucketWebsiteConfigurationUpdate(ctx context.Context, d *schema.Res websiteConfig := &types.WebsiteConfiguration{} if v, ok := d.GetOk("error_document"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.ErrorDocument = expandBucketWebsiteConfigurationErrorDocument(v.([]interface{})) + websiteConfig.ErrorDocument = expandErrorDocument(v.([]interface{})) } if v, ok := d.GetOk("index_document"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.IndexDocument = expandBucketWebsiteConfigurationIndexDocument(v.([]interface{})) + websiteConfig.IndexDocument = expandIndexDocument(v.([]interface{})) } if v, ok := d.GetOk("redirect_all_requests_to"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.RedirectAllRequestsTo = expandBucketWebsiteConfigurationRedirectAllRequestsTo(v.([]interface{})) + websiteConfig.RedirectAllRequestsTo = expandRedirectAllRequestsTo(v.([]interface{})) } if d.HasChanges("routing_rule", "routing_rules") { if d.HasChange("routing_rule") { - websiteConfig.RoutingRules = expandBucketWebsiteConfigurationRoutingRules(d.Get("routing_rule").([]interface{})) + websiteConfig.RoutingRules = expandRoutingRules(d.Get("routing_rule").([]interface{})) } else { var unmarshalledRules []types.RoutingRule if err := json.Unmarshal([]byte(d.Get("routing_rules").(string)), &unmarshalledRules); err != nil { @@ -332,7 +332,7 @@ func resourceBucketWebsiteConfigurationUpdate(ctx context.Context, d *schema.Res } else { // Still send the current RoutingRules configuration if v, ok := d.GetOk("routing_rule"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil { - websiteConfig.RoutingRules = expandBucketWebsiteConfigurationRoutingRules(v.([]interface{})) + websiteConfig.RoutingRules = expandRoutingRules(v.([]interface{})) } if v, ok := d.GetOk("routing_rules"); ok { @@ -397,7 +397,63 @@ func resourceBucketWebsiteConfigurationDelete(ctx context.Context, d *schema.Res return nil } -func expandBucketWebsiteConfigurationErrorDocument(l []interface{}) *types.ErrorDocument { +func findBucketWebsite(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketWebsiteOutput, error) { + input := &s3.GetBucketWebsiteInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketWebsite(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchWebsiteConfiguration) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func findBucketLocation(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketLocationOutput, error) { + input := &s3.GetBucketLocationInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketLocation(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + +func expandErrorDocument(l []interface{}) *types.ErrorDocument { if len(l) == 0 || l[0] == nil { return nil } @@ -416,7 +472,7 @@ func expandBucketWebsiteConfigurationErrorDocument(l []interface{}) *types.Error return result } -func expandBucketWebsiteConfigurationIndexDocument(l []interface{}) *types.IndexDocument { +func expandIndexDocument(l []interface{}) *types.IndexDocument { if len(l) == 0 || l[0] == nil { return nil } @@ -435,7 +491,7 @@ func expandBucketWebsiteConfigurationIndexDocument(l []interface{}) *types.Index return result } -func expandBucketWebsiteConfigurationRedirectAllRequestsTo(l []interface{}) *types.RedirectAllRequestsTo { +func expandRedirectAllRequestsTo(l []interface{}) *types.RedirectAllRequestsTo { if len(l) == 0 || l[0] == nil { return nil } @@ -458,7 +514,7 @@ func expandBucketWebsiteConfigurationRedirectAllRequestsTo(l []interface{}) *typ return result } -func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []types.RoutingRule { +func expandRoutingRules(l []interface{}) []types.RoutingRule { var results []types.RoutingRule for _, tfMapRaw := range l { @@ -470,11 +526,11 @@ func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []types.Routi rule := types.RoutingRule{} if v, ok := tfMap["condition"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Condition = expandBucketWebsiteConfigurationRoutingRuleCondition(v) + rule.Condition = expandCondition(v) } if v, ok := tfMap["redirect"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.Redirect = expandBucketWebsiteConfigurationRoutingRuleRedirect(v) + rule.Redirect = expandRedirect(v) } results = append(results, rule) @@ -483,7 +539,7 @@ func expandBucketWebsiteConfigurationRoutingRules(l []interface{}) []types.Routi return results } -func expandBucketWebsiteConfigurationRoutingRuleCondition(l []interface{}) *types.Condition { +func expandCondition(l []interface{}) *types.Condition { if len(l) == 0 || l[0] == nil { return nil } @@ -506,7 +562,7 @@ func expandBucketWebsiteConfigurationRoutingRuleCondition(l []interface{}) *type return result } -func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *types.Redirect { +func expandRedirect(l []interface{}) *types.Redirect { if len(l) == 0 || l[0] == nil { return nil } @@ -541,7 +597,7 @@ func expandBucketWebsiteConfigurationRoutingRuleRedirect(l []interface{}) *types return result } -func flattenBucketWebsiteConfigurationIndexDocument(i *types.IndexDocument) []interface{} { +func flattenIndexDocument(i *types.IndexDocument) []interface{} { if i == nil { return []interface{}{} } @@ -555,7 +611,7 @@ func flattenBucketWebsiteConfigurationIndexDocument(i *types.IndexDocument) []in return []interface{}{m} } -func flattenBucketWebsiteConfigurationErrorDocument(e *types.ErrorDocument) []interface{} { +func flattenErrorDocument(e *types.ErrorDocument) []interface{} { if e == nil { return []interface{}{} } @@ -569,7 +625,7 @@ func flattenBucketWebsiteConfigurationErrorDocument(e *types.ErrorDocument) []in return []interface{}{m} } -func flattenBucketWebsiteConfigurationRedirectAllRequestsTo(r *types.RedirectAllRequestsTo) []interface{} { +func flattenRedirectAllRequestsTo(r *types.RedirectAllRequestsTo) []interface{} { if r == nil { return []interface{}{} } @@ -585,18 +641,18 @@ func flattenBucketWebsiteConfigurationRedirectAllRequestsTo(r *types.RedirectAll return []interface{}{m} } -func flattenBucketWebsiteConfigurationRoutingRules(rules []types.RoutingRule) []interface{} { +func flattenRoutingRules(rules []types.RoutingRule) []interface{} { var results []interface{} for _, rule := range rules { m := make(map[string]interface{}) if rule.Condition != nil { - m["condition"] = flattenBucketWebsiteConfigurationRoutingRuleCondition(rule.Condition) + m["condition"] = flattenCondition(rule.Condition) } if rule.Redirect != nil { - m["redirect"] = flattenBucketWebsiteConfigurationRoutingRuleRedirect(rule.Redirect) + m["redirect"] = flattenRedirect(rule.Redirect) } results = append(results, m) @@ -605,7 +661,7 @@ func flattenBucketWebsiteConfigurationRoutingRules(rules []types.RoutingRule) [] return results } -func flattenBucketWebsiteConfigurationRoutingRuleCondition(c *types.Condition) []interface{} { +func flattenCondition(c *types.Condition) []interface{} { if c == nil { return []interface{}{} } @@ -623,7 +679,7 @@ func flattenBucketWebsiteConfigurationRoutingRuleCondition(c *types.Condition) [ return []interface{}{m} } -func flattenBucketWebsiteConfigurationRoutingRuleRedirect(r *types.Redirect) []interface{} { +func flattenRedirect(r *types.Redirect) []interface{} { if r == nil { return []interface{}{} } @@ -651,7 +707,7 @@ func flattenBucketWebsiteConfigurationRoutingRuleRedirect(r *types.Redirect) []i return []interface{}{m} } -func normalizeRoutingRulesV2(w []types.RoutingRule) (string, error) { +func normalizeRoutingRules(w []types.RoutingRule) (string, error) { withNulls, err := json.Marshal(w) if err != nil { return "", err @@ -701,59 +757,3 @@ func removeNilOrEmptyProtocol(data map[string]interface{}) map[string]interface{ return withoutNil } - -func findBucketWebsite(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketWebsiteOutput, error) { - input := &s3.GetBucketWebsiteInput{ - Bucket: aws.String(bucket), - } - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketWebsite(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeNoSuchWebsiteConfiguration) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} - -func findBucketLocation(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketLocationOutput, error) { - input := &s3.GetBucketLocationInput{ - Bucket: aws.String(bucket), - } - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketLocation(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} From 9ff77047e884fb84d15922de33a902a96a6f3be7 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 16:54:21 -0500 Subject: [PATCH 384/438] Add 'names.TestPartitionForRegion'. --- names/names.go | 2 ++ names/names_test.go | 47 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/names/names.go b/names/names.go index b96890b54d3..bcce189ad4f 100644 --- a/names/names.go +++ b/names/names.go @@ -176,6 +176,8 @@ func DNSSuffixForPartition(partition string) string { func PartitionForRegion(region string) string { switch region { + case "": + return "" case CNNorth1RegionID, CNNorthwest1RegionID: return ChinaPartitionID case USISOEast1RegionID, USISOWest1RegionID: diff --git a/names/names_test.go b/names/names_test.go index e6835c16f5f..f9af21d3f32 100644 --- a/names/names_test.go +++ b/names/names_test.go @@ -11,6 +11,53 @@ import ( "testing" ) +func TestPartitionForRegion(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + input string + expected string + }{ + { + name: "empty", + input: "", + expected: "", + }, + { + name: "China", + input: CNNorth1RegionID, + expected: ChinaPartitionID, + }, + { + name: "GovCloud", + input: USGovWest1RegionID, + expected: USGovCloudPartitionID, + }, + { + name: "standard", + input: USWest2RegionID, + expected: StandardPartitionID, + }, + { + name: "default", + input: "custom", + expected: StandardPartitionID, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + if got, want := PartitionForRegion(testCase.input), testCase.expected; got != want { + t.Errorf("got: %s, expected: %s", got, want) + } + }) + } +} + func TestReverseDNS(t *testing.T) { t.Parallel() From 6e03e6796d0d5b9ab7d80ff151dbfbe1bb67a8f9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 16:56:26 -0500 Subject: [PATCH 385/438] Add 'names.TestDNSSuffixForPartition'. --- names/names.go | 2 ++ names/names_test.go | 47 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) diff --git a/names/names.go b/names/names.go index bcce189ad4f..1c3ec6f7bae 100644 --- a/names/names.go +++ b/names/names.go @@ -159,6 +159,8 @@ const ( func DNSSuffixForPartition(partition string) string { switch partition { + case "": + return "" case ChinaPartitionID: return "amazonaws.com.cn" case ISOPartitionID: diff --git a/names/names_test.go b/names/names_test.go index f9af21d3f32..a9133242dbc 100644 --- a/names/names_test.go +++ b/names/names_test.go @@ -11,6 +11,53 @@ import ( "testing" ) +func TestDNSSuffixForPartition(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + input string + expected string + }{ + { + name: "empty", + input: "", + expected: "", + }, + { + name: "China", + input: ChinaPartitionID, + expected: "amazonaws.com.cn", + }, + { + name: "GovCloud", + input: USGovCloudPartitionID, + expected: "amazonaws.com", + }, + { + name: "standard", + input: StandardPartitionID, + expected: "amazonaws.com", + }, + { + name: "default", + input: "custom", + expected: "amazonaws.com", + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + if got, want := DNSSuffixForPartition(testCase.input), testCase.expected; got != want { + t.Errorf("got: %s, expected: %s", got, want) + } + }) + } +} + func TestPartitionForRegion(t *testing.T) { t.Parallel() From 9739eba4818e08ddd897abfe2070bdb27dd36a10 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 16:59:47 -0500 Subject: [PATCH 386/438] s3: Use AWS SDK for Go v1. --- internal/service/s3/bucket_replication_configuration_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/service/s3/bucket_replication_configuration_test.go b/internal/service/s3/bucket_replication_configuration_test.go index ea865eb2077..6c453428e1a 100644 --- a/internal/service/s3/bucket_replication_configuration_test.go +++ b/internal/service/s3/bucket_replication_configuration_test.go @@ -10,7 +10,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/service/s3/types" - "github.com/aws/aws-sdk-go/service/s3" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" "github.com/hashicorp/terraform-plugin-testing/helper/resource" @@ -1195,7 +1194,7 @@ func TestAccS3BucketReplicationConfiguration_directoryBucket(t *testing.T) { CheckDestroy: acctest.CheckWithProviders(testAccCheckBucketReplicationConfigurationDestroyWithProvider(ctx), &providers), Steps: []resource.TestStep{ { - Config: testAccBucketReplicationConfigurationConfig_directoryBucket(rName, s3.StorageClassStandard), + Config: testAccBucketReplicationConfigurationConfig_directoryBucket(rName, string(types.StorageClassStandard)), ExpectError: regexache.MustCompile(`directory buckets are not supported`), }, }, From 3d911ee5586173c1be7cf888ae283c7d7a61d1e9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:14:24 -0500 Subject: [PATCH 387/438] Remove use of AWS SDK for Go v1 'endpoints.S3UsEast1RegionalEndpoint' type. --- internal/conns/awsclient.go | 16 +++++++--------- internal/conns/config.go | 14 ++++++++++++-- internal/provider/provider.go | 7 +------ internal/service/s3/service_package.go | 3 +-- 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/internal/conns/awsclient.go b/internal/conns/awsclient.go index aa9e51412cf..7ab67154ef6 100644 --- a/internal/conns/awsclient.go +++ b/internal/conns/awsclient.go @@ -44,9 +44,9 @@ type AWSClient struct { lock sync.Mutex logger baselogging.Logger s3ExpressClient *s3_sdkv2.Client - s3UsePathStyle bool // From provider configuration. - s3UsEast1RegionalEndpoint endpoints_sdkv1.S3UsEast1RegionalEndpoint // From provider configuration. - stsRegion string // From provider configuration. + s3UsePathStyle bool // From provider configuration. + s3USEast1RegionalEndpoint string // From provider configuration. + stsRegion string // From provider configuration. } // CredentialsProvider returns the AWS SDK for Go v2 credentials provider. @@ -87,7 +87,7 @@ func (c *AWSClient) S3ExpressClient(ctx context.Context) *s3_sdkv2.Client { if c.s3ExpressClient == nil { if s3Client.Options().Region == names.GlobalRegionID { c.s3ExpressClient = errs.Must(client[*s3_sdkv2.Client](ctx, c, names.S3, map[string]any{ - "s3_us_east_1_regional_endpoint": endpoints_sdkv1.RegionalS3UsEast1Endpoint, + "s3_us_east_1_regional_endpoint": "regional", })) } else { c.s3ExpressClient = s3Client @@ -190,12 +190,10 @@ func (c *AWSClient) apiClientConfig(servicePackageName string) map[string]any { m["s3_use_path_style"] = c.s3UsePathStyle // AWS SDK for Go v2 does not use the AWS_S3_US_EAST_1_REGIONAL_ENDPOINT environment variable during configuration. // For compatibility, read it now. - if c.s3UsEast1RegionalEndpoint == endpoints_sdkv1.UnsetS3UsEast1Endpoint { - if v, err := endpoints_sdkv1.GetS3UsEast1RegionalEndpoint(os.Getenv("AWS_S3_US_EAST_1_REGIONAL_ENDPOINT")); err == nil { - c.s3UsEast1RegionalEndpoint = v - } + if c.s3USEast1RegionalEndpoint == "" { + c.s3USEast1RegionalEndpoint = NormalizeS3USEast1RegionalEndpoint(os.Getenv("AWS_S3_US_EAST_1_REGIONAL_ENDPOINT")) } - m["s3_us_east_1_regional_endpoint"] = c.s3UsEast1RegionalEndpoint + m["s3_us_east_1_regional_endpoint"] = c.s3USEast1RegionalEndpoint case names.STS: m["sts_region"] = c.stsRegion } diff --git a/internal/conns/config.go b/internal/conns/config.go index 743cba1d2ef..57a6df216c0 100644 --- a/internal/conns/config.go +++ b/internal/conns/config.go @@ -6,6 +6,7 @@ package conns import ( "context" "fmt" + "strings" aws_sdkv2 "github.com/aws/aws-sdk-go-v2/aws" imds_sdkv2 "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" @@ -45,7 +46,7 @@ type Config struct { Region string RetryMode aws_sdkv2.RetryMode S3UsePathStyle bool - S3UsEast1RegionalEndpoint endpoints_sdkv1.S3UsEast1RegionalEndpoint + S3USEast1RegionalEndpoint string SecretKey string SharedConfigFiles []string SharedCredentialsFiles []string @@ -211,7 +212,7 @@ func (c *Config) ConfigureProvider(ctx context.Context, client *AWSClient) (*AWS client.endpoints = c.Endpoints client.logger = logger client.s3UsePathStyle = c.S3UsePathStyle - client.s3UsEast1RegionalEndpoint = c.S3UsEast1RegionalEndpoint + client.s3USEast1RegionalEndpoint = c.S3USEast1RegionalEndpoint client.stsRegion = c.STSRegion return client, diags @@ -227,3 +228,12 @@ func baseSeverityToSdkSeverity(s basediag.Severity) diag.Severity { return -1 } } + +func NormalizeS3USEast1RegionalEndpoint(v string) string { + switch v := strings.ToLower(v); v { + case "legacy", "regional": + return v + default: + return "" + } +} diff --git a/internal/provider/provider.go b/internal/provider/provider.go index b0523543fcf..670b34912fe 100644 --- a/internal/provider/provider.go +++ b/internal/provider/provider.go @@ -14,7 +14,6 @@ import ( "github.com/YakDriver/regexache" "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/feature/ec2/imds" - "github.com/aws/aws-sdk-go/aws/endpoints" awsbase "github.com/hashicorp/aws-sdk-go-base/v2" "github.com/hashicorp/terraform-plugin-log/tflog" "github.com/hashicorp/terraform-plugin-sdk/v2/diag" @@ -500,11 +499,7 @@ func configure(ctx context.Context, provider *schema.Provider, d *schema.Resourc } if v, ok := d.Get("s3_us_east_1_regional_endpoint").(string); ok && v != "" { - endpoint, err := endpoints.GetS3UsEast1RegionalEndpoint(v) - if err != nil { - return nil, sdkdiag.AppendFromErr(diags, err) - } - config.S3UsEast1RegionalEndpoint = endpoint + config.S3USEast1RegionalEndpoint = conns.NormalizeS3USEast1RegionalEndpoint(v) } if v, ok := d.GetOk("allowed_account_ids"); ok && v.(*schema.Set).Len() > 0 { diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index a56ba5a7750..43ef06b8ac5 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -9,7 +9,6 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/s3" - endpoints_sdkv1 "github.com/aws/aws-sdk-go/aws/endpoints" "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/names" @@ -22,7 +21,7 @@ func (p *servicePackage) NewClient(ctx context.Context, config map[string]any) ( return s3.NewFromConfig(cfg, func(o *s3.Options) { if endpoint := config["endpoint"].(string); endpoint != "" { o.BaseEndpoint = aws.String(endpoint) - } else if o.Region == names.USEast1RegionID && config["s3_us_east_1_regional_endpoint"].(endpoints_sdkv1.S3UsEast1RegionalEndpoint) != endpoints_sdkv1.RegionalS3UsEast1Endpoint { + } else if o.Region == names.USEast1RegionID && config["s3_us_east_1_regional_endpoint"].(string) != "regional" { // Maintain the AWS SDK for Go v1 default of using the global endpoint in us-east-1. // See https://github.com/hashicorp/terraform-provider-aws/issues/33028. o.Region = names.GlobalRegionID From a419ba9f843a8d372d91ba70313ef8dc0326fc46 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:18:09 -0500 Subject: [PATCH 388/438] Fix tfproviderlint 'AWSAT003'. --- internal/service/s3/bucket_test.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 6dff0e640ce..a784acd3d79 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2415,23 +2415,23 @@ func TestWebsiteEndpoint(t *testing.T) { }{ { LocationConstraint: "", - Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", names.USEast1RegionID, acctest.PartitionDNSSuffix()), + Expected: fmt.Sprintf("bucket-name.s3-website-%s.amazonaws.com", names.USEast1RegionID), }, { LocationConstraint: names.USEast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USEast2RegionID, acctest.PartitionDNSSuffix()), + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USEast2RegionID), }, { LocationConstraint: names.USGovEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USGovEast1RegionID, acctest.PartitionDNSSuffix()), + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USGovEast1RegionID), }, { - LocationConstraint: "us-iso-east-1", - Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", "us-iso-east-1"), + LocationConstraint: names.USISOEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.c2s.ic.gov", names.USISOEast1RegionID), }, { - LocationConstraint: "us-isob-east-1", - Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", "us-isob-east-1"), + LocationConstraint: names.USISOBEast1RegionID, + Expected: fmt.Sprintf("bucket-name.s3-website.%s.sc2s.sgov.gov", names.USISOBEast1RegionID), }, { LocationConstraint: names.CNNorth1RegionID, From f10a484afb465a7809bfc87fba749deed3b98872 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:23:12 -0500 Subject: [PATCH 389/438] s3: Cosmetics. --- .../s3/bucket_analytics_configuration.go | 8 +-- internal/service/s3/bucket_notification.go | 56 +++++++++---------- .../service/s3/bucket_ownership_controls.go | 50 ++++++++--------- 3 files changed, 57 insertions(+), 57 deletions(-) diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index 7c79e50d43f..1733e349ca9 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -331,12 +331,12 @@ func expandAnalyticsExportDestination(edl []interface{}) *types.AnalyticsExportD if len(edl) != 0 && edl[0] != nil { edm := edl[0].(map[string]interface{}) - result.S3BucketDestination = expandAnalyticsBucketDestination(edm["s3_bucket_destination"].([]interface{})) + result.S3BucketDestination = expandAnalyticsS3BucketDestination(edm["s3_bucket_destination"].([]interface{})) } return result } -func expandAnalyticsBucketDestination(bdl []interface{}) *types.AnalyticsS3BucketDestination { +func expandAnalyticsS3BucketDestination(bdl []interface{}) *types.AnalyticsS3BucketDestination { result := &types.AnalyticsS3BucketDestination{} if len(bdl) != 0 && bdl[0] != nil { @@ -407,12 +407,12 @@ func flattenAnalyticsExportDestination(destination *types.AnalyticsExportDestina return []interface{}{ map[string]interface{}{ - "s3_bucket_destination": flattenAnalyticsBucketDestination(destination.S3BucketDestination), + "s3_bucket_destination": flattenAnalyticsS3BucketDestination(destination.S3BucketDestination), }, } } -func flattenAnalyticsBucketDestination(bucketDestination *types.AnalyticsS3BucketDestination) []interface{} { +func flattenAnalyticsS3BucketDestination(bucketDestination *types.AnalyticsS3BucketDestination) []interface{} { if bucketDestination == nil { return nil } diff --git a/internal/service/s3/bucket_notification.go b/internal/service/s3/bucket_notification.go index d08943afe50..1dc624630b4 100644 --- a/internal/service/s3/bucket_notification.go +++ b/internal/service/s3/bucket_notification.go @@ -383,6 +383,34 @@ func resourceBucketNotificationDelete(ctx context.Context, d *schema.ResourceDat return diags } +func findBucketNotificationConfiguration(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketNotificationConfigurationOutput, error) { + input := &s3.GetBucketNotificationConfigurationInput{ + Bucket: aws.String(bucket), + } + if expectedBucketOwner != "" { + input.ExpectedBucketOwner = aws.String(expectedBucketOwner) + } + + output, err := conn.GetBucketNotificationConfiguration(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output, nil +} + func flattenNotificationConfigurationFilter(filter *types.NotificationConfigurationFilter) map[string]interface{} { filterRules := map[string]interface{}{} if filter.Key == nil || filter.Key.FilterRules == nil { @@ -456,31 +484,3 @@ func flattenLambdaFunctionConfigurations(configs []types.LambdaFunctionConfigura return lambdaFunctionNotifications } - -func findBucketNotificationConfiguration(ctx context.Context, conn *s3.Client, bucket, expectedBucketOwner string) (*s3.GetBucketNotificationConfigurationOutput, error) { - input := &s3.GetBucketNotificationConfigurationInput{ - Bucket: aws.String(bucket), - } - if expectedBucketOwner != "" { - input.ExpectedBucketOwner = aws.String(expectedBucketOwner) - } - - output, err := conn.GetBucketNotificationConfiguration(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output, nil -} diff --git a/internal/service/s3/bucket_ownership_controls.go b/internal/service/s3/bucket_ownership_controls.go index 2c1d777d54b..5d2c4b32990 100644 --- a/internal/service/s3/bucket_ownership_controls.go +++ b/internal/service/s3/bucket_ownership_controls.go @@ -169,6 +169,31 @@ func resourceBucketOwnershipControlsDelete(ctx context.Context, d *schema.Resour return diags } +func findOwnershipControls(ctx context.Context, conn *s3.Client, bucket string) (*types.OwnershipControls, error) { + input := &s3.GetBucketOwnershipControlsInput{ + Bucket: aws.String(bucket), + } + + output, err := conn.GetBucketOwnershipControls(ctx, input) + + if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeOwnershipControlsNotFoundError) { + return nil, &retry.NotFoundError{ + LastError: err, + LastRequest: input, + } + } + + if err != nil { + return nil, err + } + + if output == nil || output.OwnershipControls == nil { + return nil, tfresource.NewEmptyResultError(input) + } + + return output.OwnershipControls, nil +} + func expandOwnershipControlsRules(tfList []interface{}) []types.OwnershipControlsRule { if len(tfList) == 0 || tfList[0] == nil { return nil @@ -220,28 +245,3 @@ func flattenOwnershipControlsRule(apiObject types.OwnershipControlsRule) map[str return tfMap } - -func findOwnershipControls(ctx context.Context, conn *s3.Client, bucket string) (*types.OwnershipControls, error) { - input := &s3.GetBucketOwnershipControlsInput{ - Bucket: aws.String(bucket), - } - - output, err := conn.GetBucketOwnershipControls(ctx, input) - - if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket, errCodeOwnershipControlsNotFoundError) { - return nil, &retry.NotFoundError{ - LastError: err, - LastRequest: input, - } - } - - if err != nil { - return nil, err - } - - if output == nil || output.OwnershipControls == nil { - return nil, tfresource.NewEmptyResultError(input) - } - - return output.OwnershipControls, nil -} From 696a7c694b48bd96160b5e236962b5a5a752d16a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:28:14 -0500 Subject: [PATCH 390/438] Add CHANGELOG entry. --- .changelog/35035.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/35035.txt diff --git a/.changelog/35035.txt b/.changelog/35035.txt new file mode 100644 index 00000000000..9c530501f8c --- /dev/null +++ b/.changelog/35035.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_s3_bucket: Modify resource Read to support third-party S3 API implementations. Because we cannot easily test this functionality, it is best effort and we ask for community help in testing +``` \ No newline at end of file From 4e7e6ff76466ff8679642ec0f51b1b52eba71ad4 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:31:51 -0500 Subject: [PATCH 391/438] r/aws_s3_bucket: Check for 'errCodeMethodNotAllowed' on all sub-resource reads. --- internal/service/s3/bucket.go | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 8e2c09e7c2c..60eea32b4b2 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -829,7 +829,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf } d.Set("policy", policyToSet) - case tfawserr.ErrCodeEquals(err, errCodeNoSuchBucketPolicy, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeNoSuchBucketPolicy, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("policy", nil) default: return diag.Errorf("reading S3 Bucket (%s) policy: %s", d.Id(), err) @@ -853,7 +853,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("grant", flattenBucketGrants(bucketACL)); err != nil { return sdkdiag.AppendErrorf(diags, "setting grant: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("grant", nil) default: return diag.Errorf("reading S3 Bucket (%s) ACL: %s", d.Id(), err) @@ -877,7 +877,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("cors_rule", flattenBucketCORSRules(corsRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting cors_rule: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("cors_rule", nil) default: return diag.Errorf("reading S3 Bucket (%s) CORS configuration: %s", d.Id(), err) @@ -929,7 +929,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("versioning", flattenBucketVersioning(bucketVersioning)); err != nil { return sdkdiag.AppendErrorf(diags, "setting versioning: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("versioning", nil) default: return diag.Errorf("reading S3 Bucket (%s) versioning: %s", d.Id(), err) @@ -951,7 +951,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: d.Set("acceleration_status", bucketAccelerate.Status) - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeUnsupportedArgument, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented, errCodeUnsupportedArgument): d.Set("acceleration_status", nil) default: return diag.Errorf("reading S3 Bucket (%s) accelerate configuration: %s", d.Id(), err) @@ -973,7 +973,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: d.Set("request_payer", bucketRequestPayment.Payer) - case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("request_payer", nil) default: return diag.Errorf("reading S3 Bucket (%s) request payment configuration: %s", d.Id(), err) @@ -997,7 +997,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("logging", flattenBucketLoggingEnabled(loggingEnabled)); err != nil { return sdkdiag.AppendErrorf(diags, "setting logging: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("logging", nil) default: return diag.Errorf("reading S3 Bucket (%s) logging: %s", d.Id(), err) @@ -1021,7 +1021,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("lifecycle_rule", flattenBucketLifecycleRules(ctx, lifecycleRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lifecycle_rule: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNoSuchLifecycleConfiguration, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeNoSuchLifecycleConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("lifecycle_rule", nil) default: return diag.Errorf("reading S3 Bucket (%s) lifecycle configuration: %s", d.Id(), err) @@ -1045,7 +1045,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("replication_configuration", flattenBucketReplicationConfiguration(ctx, replicationConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting replication_configuration: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("replication_configuration", nil) default: return diag.Errorf("reading S3 Bucket (%s) replication configuration: %s", d.Id(), err) @@ -1069,7 +1069,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("server_side_encryption_configuration", flattenBucketServerSideEncryptionConfiguration(encryptionConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting server_side_encryption_configuration: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("server_side_encryption_configuration", nil) default: return diag.Errorf("reading S3 Bucket (%s) server-side encryption configuration: %s", d.Id(), err) @@ -1158,7 +1158,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: setTagsOut(ctx, Tags(tags)) - case tfawserr.ErrCodeEquals(err, errCodeNotImplemented, errCodeXNotImplemented): + case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): default: return sdkdiag.AppendErrorf(diags, "listing tags for S3 Bucket (%s): %s", d.Id(), err) } From 486d9f13ca9ab7af835955f8de77feed2460918e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:33:22 -0500 Subject: [PATCH 392/438] Fix golangci-lint 'unused'. --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 60eea32b4b2..9f42085b124 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1225,7 +1225,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte input := &s3.PutBucketCorsInput{ Bucket: aws.String(d.Id()), CORSConfiguration: &types.CORSConfiguration{ - CORSRules: expandCORSRules(d.Get("cors_rule").(*schema.Set).List()), + CORSRules: expandBucketCORSRules(d.Get("cors_rule").(*schema.Set).List()), }, } From 59afeaea60f463e4693864b806f35628095bf95a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:42:30 -0500 Subject: [PATCH 393/438] Fix semgrep 'ci.s3-in-func-name'. --- internal/service/s3/bucket_analytics_configuration.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index 1733e349ca9..846db0154fe 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -336,7 +336,7 @@ func expandAnalyticsExportDestination(edl []interface{}) *types.AnalyticsExportD return result } -func expandAnalyticsS3BucketDestination(bdl []interface{}) *types.AnalyticsS3BucketDestination { +func expandAnalyticsS3BucketDestination(bdl []interface{}) *types.AnalyticsS3BucketDestination { // nosemgrep:ci.s3-in-func-name result := &types.AnalyticsS3BucketDestination{} if len(bdl) != 0 && bdl[0] != nil { @@ -412,7 +412,7 @@ func flattenAnalyticsExportDestination(destination *types.AnalyticsExportDestina } } -func flattenAnalyticsS3BucketDestination(bucketDestination *types.AnalyticsS3BucketDestination) []interface{} { +func flattenAnalyticsS3BucketDestination(bucketDestination *types.AnalyticsS3BucketDestination) []interface{} { // nosemgrep:ci.s3-in-func-name if bucketDestination == nil { return nil } From a747649bb84886080f01649ff73e9e291319d294 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Thu, 21 Dec 2023 17:49:59 -0500 Subject: [PATCH 394/438] r/aws_s3_bucket: Fix 'panic: interface conversion: interface {} is types.Permission, not string'. --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 9f42085b124..7c73e958f0a 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2061,7 +2061,7 @@ func flattenBucketGrants(apiObject *s3.GetBucketAclOutput) []interface{} { if v, ok := getGrant(results, m); ok { v.(map[string]interface{})["permissions"].(*schema.Set).Add(apiObject.Permission) } else { - m["permissions"] = schema.NewSet(schema.HashString, []interface{}{apiObject.Permission}) + m["permissions"] = schema.NewSet(schema.HashString, []interface{}{string(apiObject.Permission)}) results = append(results, m) } } From 65a8b0faa6b653f06e00a9e0c7e343aab931e829 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Dec 2023 07:00:33 +0000 Subject: [PATCH 395/438] build(deps): bump github.com/aws/aws-sdk-go in /.ci/providerlint Bumps [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) from 1.49.5 to 1.49.8. - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.5...v1.49.8) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .ci/providerlint/go.mod | 2 +- .ci/providerlint/go.sum | 4 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 490 ++++++++++++++++++ .ci/providerlint/vendor/modules.txt | 2 +- 4 files changed, 494 insertions(+), 4 deletions(-) diff --git a/.ci/providerlint/go.mod b/.ci/providerlint/go.mod index 8eccb4b7f72..9e9a3c09361 100644 --- a/.ci/providerlint/go.mod +++ b/.ci/providerlint/go.mod @@ -3,7 +3,7 @@ module github.com/hashicorp/terraform-provider-aws/ci/providerlint go 1.20 require ( - github.com/aws/aws-sdk-go v1.49.5 + github.com/aws/aws-sdk-go v1.49.8 github.com/bflad/tfproviderlint v0.29.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.31.0 golang.org/x/tools v0.13.0 diff --git a/.ci/providerlint/go.sum b/.ci/providerlint/go.sum index d259a928f78..223e1242752 100644 --- a/.ci/providerlint/go.sum +++ b/.ci/providerlint/go.sum @@ -7,8 +7,8 @@ github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA= -github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.8 h1:gKgEiyJ8CPnr4r6pS06WfNXvp6z34JER1pBIwuocvVA= +github.com/aws/aws-sdk-go v1.49.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/bflad/gopaniccheck v0.1.0 h1:tJftp+bv42ouERmUMWLoUn/5bi/iQZjHPznM00cP/bU= github.com/bflad/gopaniccheck v0.1.0/go.mod h1:ZCj2vSr7EqVeDaqVsWN4n2MwdROx1YL+LFo47TSWtsA= github.com/bflad/tfproviderlint v0.29.0 h1:zxKYAAM6IZ4ace1a3LX+uzMRIMP8L+iOtEc+FP2Yoow= diff --git a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 16de4d78eec..cd565b4d3ba 100644 --- a/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/.ci/providerlint/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -31,6 +31,7 @@ const ( ApSoutheast3RegionID = "ap-southeast-3" // Asia Pacific (Jakarta). ApSoutheast4RegionID = "ap-southeast-4" // Asia Pacific (Melbourne). CaCentral1RegionID = "ca-central-1" // Canada (Central). + CaWest1RegionID = "ca-west-1" // Canada West (Calgary). EuCentral1RegionID = "eu-central-1" // Europe (Frankfurt). EuCentral2RegionID = "eu-central-2" // Europe (Zurich). EuNorth1RegionID = "eu-north-1" // Europe (Stockholm). @@ -190,6 +191,9 @@ var awsPartition = partition{ "ca-central-1": region{ Description: "Canada (Central)", }, + "ca-west-1": region{ + Description: "Canada West (Calgary)", + }, "eu-central-1": region{ Description: "Europe (Frankfurt)", }, @@ -291,6 +295,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "access-analyzer-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -477,6 +484,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "acm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -1269,6 +1294,14 @@ var awsPartition = partition{ Region: "ca-central-1", }, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "api.ecr.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + }, endpointKey{ Region: "dkr-us-east-1", }: endpoint{ @@ -2251,6 +2284,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "apigateway-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2284,6 +2326,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "apigateway-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -2442,6 +2493,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2530,6 +2584,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -2735,6 +2792,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4005,6 +4065,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "autoscaling-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -4038,6 +4107,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "autoscaling-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -4485,6 +4563,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5095,6 +5176,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "cloudcontrolapi-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5128,6 +5218,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "cloudcontrolapi-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -5283,6 +5382,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -5576,6 +5678,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -6159,6 +6264,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -7345,6 +7453,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8316,6 +8427,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "datasync-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -8349,6 +8469,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "datasync-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -8514,6 +8643,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "datazone-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "datazone.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -8834,6 +8968,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9007,6 +9144,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9095,6 +9235,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "dms", }: endpoint{ @@ -9480,6 +9623,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ds-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9657,6 +9803,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "dynamodb-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9820,6 +9984,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ebs-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -9853,6 +10026,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ebs-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -10181,6 +10363,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10362,6 +10547,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -10545,6 +10733,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "eks-auth.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "eks-auth.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -10665,6 +10858,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11468,6 +11664,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -11628,6 +11827,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "elasticmapreduce-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -11663,6 +11871,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "elasticmapreduce-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -12351,6 +12568,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "aos.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "aos.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12604,6 +12830,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -12854,6 +13083,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -14845,6 +15077,9 @@ var awsPartition = partition{ endpointKey{ Region: "ap-southeast-3", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -15329,6 +15564,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "internetmonitor-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "internetmonitor.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -16877,6 +17117,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "kendra-ranking-fips.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "kendra-ranking.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-2", }: endpoint{ @@ -17005,6 +17250,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -17482,6 +17730,24 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + Hostname: "kms-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18030,6 +18296,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "lambda.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "lambda.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18422,6 +18697,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -18764,6 +19042,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -19284,12 +19565,18 @@ var awsPartition = partition{ endpointKey{ Region: "ap-south-1", }: endpoint{}, + endpointKey{ + Region: "ap-south-2", + }: endpoint{}, endpointKey{ Region: "ap-southeast-1", }: endpoint{}, endpointKey{ Region: "ap-southeast-2", }: endpoint{}, + endpointKey{ + Region: "ap-southeast-4", + }: endpoint{}, endpointKey{ Region: "ca-central-1", }: endpoint{}, @@ -19308,6 +19595,9 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "me-central-1", + }: endpoint{}, endpointKey{ Region: "sa-east-1", }: endpoint{}, @@ -20039,6 +20329,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -20472,6 +20765,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21161,6 +21457,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -21948,6 +22247,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -22753,6 +23055,11 @@ var awsPartition = partition{ }: endpoint{ Hostname: "qbusiness.ca-central-1.api.aws", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{ + Hostname: "qbusiness.ca-west-1.api.aws", + }, endpointKey{ Region: "eu-central-1", }: endpoint{ @@ -23025,6 +23332,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ram-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23058,6 +23374,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ram-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -23188,6 +23513,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "rbin-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23221,6 +23555,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "rbin-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -23758,6 +24101,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "redshift-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -23791,6 +24143,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "redshift-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -24434,6 +24795,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -24793,6 +25157,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25033,6 +25400,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25310,6 +25680,27 @@ var awsPartition = partition{ }: endpoint{ Hostname: "s3-fips.dualstack.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{ + Hostname: "s3.dualstack.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + }, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{ + Hostname: "s3-fips.dualstack.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -25395,6 +25786,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "s3-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -26639,6 +27039,27 @@ var awsPartition = partition{ Deprecated: boxedTrue, }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant | dualStackVariant, + }: endpoint{}, + endpointKey{ + Region: "ca-west-1-fips", + }: endpoint{ + + Deprecated: boxedTrue, + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -26847,6 +27268,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -27303,6 +27727,9 @@ var awsPartition = partition{ }: endpoint{ Hostname: "servicecatalog-appregistry-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -27826,6 +28253,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28794,6 +29224,15 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -28818,6 +29257,15 @@ var awsPartition = partition{ endpointKey{ Region: "eu-west-3", }: endpoint{}, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "sns-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -28948,6 +29396,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29105,6 +29556,15 @@ var awsPartition = partition{ }: endpoint{ Hostname: "ssm-fips.ca-central-1.amazonaws.com", }, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + Variant: fipsVariant, + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + }, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -29138,6 +29598,15 @@ var awsPartition = partition{ }, Deprecated: boxedTrue, }, + endpointKey{ + Region: "fips-ca-west-1", + }: endpoint{ + Hostname: "ssm-fips.ca-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ca-west-1", + }, + Deprecated: boxedTrue, + }, endpointKey{ Region: "fips-us-east-1", }: endpoint{ @@ -29722,6 +30191,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30041,6 +30513,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30147,6 +30622,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30321,6 +30799,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30469,6 +30950,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -30617,6 +31101,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, @@ -33543,6 +34030,9 @@ var awsPartition = partition{ endpointKey{ Region: "ca-central-1", }: endpoint{}, + endpointKey{ + Region: "ca-west-1", + }: endpoint{}, endpointKey{ Region: "eu-central-1", }: endpoint{}, diff --git a/.ci/providerlint/vendor/modules.txt b/.ci/providerlint/vendor/modules.txt index 0af3b9322a2..fd8f72ff085 100644 --- a/.ci/providerlint/vendor/modules.txt +++ b/.ci/providerlint/vendor/modules.txt @@ -24,7 +24,7 @@ github.com/agext/levenshtein # github.com/apparentlymart/go-textseg/v15 v15.0.0 ## explicit; go 1.16 github.com/apparentlymart/go-textseg/v15/textseg -# github.com/aws/aws-sdk-go v1.49.5 +# github.com/aws/aws-sdk-go v1.49.8 ## explicit; go 1.19 github.com/aws/aws-sdk-go/aws/awserr github.com/aws/aws-sdk-go/aws/endpoints From 1b703bf677c7e6090bfff0165258544567bcbd80 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 08:17:17 -0500 Subject: [PATCH 396/438] r/aws_s3_bucket: Check for 'tfresource.NotFound()' on all sub-resource reads. --- internal/service/s3/bucket.go | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 7c73e958f0a..af8ebdf684f 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -829,7 +829,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf } d.Set("policy", policyToSet) - case tfawserr.ErrCodeEquals(err, errCodeNoSuchBucketPolicy, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("policy", nil) default: return diag.Errorf("reading S3 Bucket (%s) policy: %s", d.Id(), err) @@ -853,7 +853,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("grant", flattenBucketGrants(bucketACL)); err != nil { return sdkdiag.AppendErrorf(diags, "setting grant: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("grant", nil) default: return diag.Errorf("reading S3 Bucket (%s) ACL: %s", d.Id(), err) @@ -877,7 +877,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("cors_rule", flattenBucketCORSRules(corsRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting cors_rule: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeNoSuchCORSConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("cors_rule", nil) default: return diag.Errorf("reading S3 Bucket (%s) CORS configuration: %s", d.Id(), err) @@ -905,7 +905,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("website", website); err != nil { return sdkdiag.AppendErrorf(diags, "setting website: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNoSuchWebsiteConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("website", nil) default: return diag.Errorf("reading S3 Bucket (%s) website configuration: %s", d.Id(), err) @@ -929,7 +929,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("versioning", flattenBucketVersioning(bucketVersioning)); err != nil { return sdkdiag.AppendErrorf(diags, "setting versioning: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("versioning", nil) default: return diag.Errorf("reading S3 Bucket (%s) versioning: %s", d.Id(), err) @@ -951,7 +951,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: d.Set("acceleration_status", bucketAccelerate.Status) - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented, errCodeUnsupportedArgument): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented, errCodeUnsupportedArgument): d.Set("acceleration_status", nil) default: return diag.Errorf("reading S3 Bucket (%s) accelerate configuration: %s", d.Id(), err) @@ -973,7 +973,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf switch { case err == nil: d.Set("request_payer", bucketRequestPayment.Payer) - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("request_payer", nil) default: return diag.Errorf("reading S3 Bucket (%s) request payment configuration: %s", d.Id(), err) @@ -997,7 +997,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("logging", flattenBucketLoggingEnabled(loggingEnabled)); err != nil { return sdkdiag.AppendErrorf(diags, "setting logging: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("logging", nil) default: return diag.Errorf("reading S3 Bucket (%s) logging: %s", d.Id(), err) @@ -1021,7 +1021,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("lifecycle_rule", flattenBucketLifecycleRules(ctx, lifecycleRules)); err != nil { return sdkdiag.AppendErrorf(diags, "setting lifecycle_rule: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeNoSuchLifecycleConfiguration, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("lifecycle_rule", nil) default: return diag.Errorf("reading S3 Bucket (%s) lifecycle configuration: %s", d.Id(), err) @@ -1045,7 +1045,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("replication_configuration", flattenBucketReplicationConfiguration(ctx, replicationConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting replication_configuration: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeReplicationConfigurationNotFound, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("replication_configuration", nil) default: return diag.Errorf("reading S3 Bucket (%s) replication configuration: %s", d.Id(), err) @@ -1069,7 +1069,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf if err := d.Set("server_side_encryption_configuration", flattenBucketServerSideEncryptionConfiguration(encryptionConfiguration)); err != nil { return sdkdiag.AppendErrorf(diags, "setting server_side_encryption_configuration: %s", err) } - case tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("server_side_encryption_configuration", nil) default: return diag.Errorf("reading S3 Bucket (%s) server-side encryption configuration: %s", d.Id(), err) @@ -1094,7 +1094,7 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf return sdkdiag.AppendErrorf(diags, "setting object_lock_configuration: %s", err) } d.Set("object_lock_enabled", objLockConfig.ObjectLockEnabled == types.ObjectLockEnabledEnabled) - case tfawserr.ErrCodeEquals(err, errCodeObjectLockConfigurationNotFoundError, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): + case tfresource.NotFound(err), tfawserr.ErrCodeEquals(err, errCodeMethodNotAllowed, errCodeNotImplemented, errCodeXNotImplemented): d.Set("object_lock_configuration", nil) d.Set("object_lock_enabled", nil) default: From 81dd075c89d02b4d909bf476c86b6cbd11dd4358 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 08:18:56 -0500 Subject: [PATCH 397/438] Fix providerlint 'AWSR001'. --- internal/service/s3/bucket_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index a784acd3d79..1c720ac46ef 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2415,15 +2415,15 @@ func TestWebsiteEndpoint(t *testing.T) { }{ { LocationConstraint: "", - Expected: fmt.Sprintf("bucket-name.s3-website-%s.amazonaws.com", names.USEast1RegionID), + Expected: fmt.Sprintf("bucket-name.s3-website-%s.amazonaws.com", names.USEast1RegionID), //lintignore:AWSR001 }, { LocationConstraint: names.USEast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USEast2RegionID), + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USEast2RegionID), //lintignore:AWSR001 }, { LocationConstraint: names.USGovEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USGovEast1RegionID), + Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USGovEast1RegionID), //lintignore:AWSR001 }, { LocationConstraint: names.USISOEast1RegionID, From a74eb06c3ebf21c143de025d600559fe24192e1c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 09:13:21 -0500 Subject: [PATCH 398/438] Add CHANGELOG entry. --- .changelog/#####.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .changelog/#####.txt diff --git a/.changelog/#####.txt b/.changelog/#####.txt new file mode 100644 index 00000000000..cb03be81450 --- /dev/null +++ b/.changelog/#####.txt @@ -0,0 +1,7 @@ +```release-note:enhancement +resource/aws_lambda_function: Add support for `python3.12` `runtime` value +``` + +```release-note:enhancement +resource/aws_lambda_layer_version: Add support for `python3.12` `compatible_runtimes` value +``` \ No newline at end of file From f8fc4c543a14cf868405785f832c24a0ab5bbe74 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 09:15:00 -0500 Subject: [PATCH 399/438] Correct CHANGELOG entry file name. --- .changelog/{#####.txt => 35049.txt} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .changelog/{#####.txt => 35049.txt} (100%) diff --git a/.changelog/#####.txt b/.changelog/35049.txt similarity index 100% rename from .changelog/#####.txt rename to .changelog/35049.txt From 1e5f3553a0e8b80ef9bd001bbf4de82af00a9f7f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 09:26:43 -0500 Subject: [PATCH 400/438] More test checks in 'TestAccS3Bucket_Basic_basic'. --- internal/service/s3/bucket_test.go | 45 +++++++++++++++++++++++------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index bddb085ee2c..69aaf30874f 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -48,7 +48,7 @@ func testAccErrorCheckSkip(t *testing.T) resource.ErrorCheckFunc { func TestAccS3Bucket_Basic_basic(t *testing.T) { ctx := acctest.Context(t) - bucketName := sdkacctest.RandomWithPrefix("tf-test-bucket") + rName := sdkacctest.RandomWithPrefix("tf-test-bucket") region := acctest.Region() hostedZoneID, _ := tfs3.HostedZoneIDForRegion(region) resourceName := "aws_s3_bucket.test" @@ -60,21 +60,46 @@ func TestAccS3Bucket_Basic_basic(t *testing.T) { CheckDestroy: testAccCheckBucketDestroy(ctx), Steps: []resource.TestStep{ { - Config: testAccBucketConfig_basic(bucketName), - Check: resource.ComposeTestCheckFunc( + Config: testAccBucketConfig_basic(rName), + Check: resource.ComposeAggregateTestCheckFunc( testAccCheckBucketExists(ctx, resourceName), + resource.TestCheckResourceAttr(resourceName, "acceleration_status", ""), + resource.TestCheckNoResourceAttr(resourceName, "acl"), + acctest.CheckResourceAttrGlobalARNNoAccount(resourceName, "arn", "s3", rName), + resource.TestCheckResourceAttr(resourceName, "bucket", rName), + testAccCheckBucketDomainName(resourceName, "bucket_domain_name", rName), + resource.TestCheckResourceAttr(resourceName, "bucket_prefix", ""), + resource.TestCheckResourceAttr(resourceName, "bucket_regional_domain_name", testAccBucketRegionalDomainName(rName, region)), + resource.TestCheckResourceAttr(resourceName, "cors_rule.#", "0"), + resource.TestCheckResourceAttr(resourceName, "force_destroy", "false"), + resource.TestCheckResourceAttr(resourceName, "grant.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resourceName, "grant.*", map[string]string{ + "permissions.#": "1", + "type": "CanonicalUser", + "uri": "", + }), resource.TestCheckResourceAttr(resourceName, "hosted_zone_id", hostedZoneID), + resource.TestCheckResourceAttr(resourceName, "lifecycle_rule.#", "0"), + resource.TestCheckResourceAttr(resourceName, "logging.#", "0"), + resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "policy", ""), resource.TestCheckResourceAttr(resourceName, "region", region), - resource.TestCheckNoResourceAttr(resourceName, "website_endpoint"), - acctest.CheckResourceAttrGlobalARNNoAccount(resourceName, "arn", "s3", bucketName), - resource.TestCheckResourceAttr(resourceName, "bucket", bucketName), - testAccCheckBucketDomainName(resourceName, "bucket_domain_name", bucketName), - resource.TestCheckResourceAttr(resourceName, "bucket_regional_domain_name", testAccBucketRegionalDomainName(bucketName, region)), + resource.TestCheckResourceAttr(resourceName, "replication_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "request_payer", "BucketOwner"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.#", "1"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.kms_master_key_id", ""), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.apply_server_side_encryption_by_default.0.sse_algorithm", "AES256"), + resource.TestCheckResourceAttr(resourceName, "server_side_encryption_configuration.0.rule.0.bucket_key_enabled", "false"), + resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), resource.TestCheckResourceAttr(resourceName, "versioning.#", "1"), resource.TestCheckResourceAttr(resourceName, "versioning.0.enabled", "false"), resource.TestCheckResourceAttr(resourceName, "versioning.0.mfa_delete", "false"), - resource.TestCheckResourceAttr(resourceName, "object_lock_enabled", "false"), - resource.TestCheckResourceAttr(resourceName, "object_lock_configuration.#", "0"), + resource.TestCheckResourceAttr(resourceName, "website.#", "0"), + resource.TestCheckNoResourceAttr(resourceName, "website_domain"), + resource.TestCheckNoResourceAttr(resourceName, "website_endpoint"), ), }, { From 1cb592b3d92be494f7aae1d16b4dbada35ee23b9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 10:42:48 -0500 Subject: [PATCH 401/438] r/aws_s3_bucket: Extra eventual consistency retries during CheckDestroy. --- internal/service/s3/bucket_test.go | 13 +++++++++---- internal/service/s3/exports_test.go | 1 + 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 69aaf30874f..5b7a5e0433d 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -5,6 +5,7 @@ package s3_test import ( "context" + "errors" "fmt" "log" "reflect" @@ -2612,17 +2613,21 @@ func testAccCheckBucketDestroyWithProvider(ctx context.Context) acctest.TestChec continue } - err := tfs3.FindBucket(ctx, conn, rs.Primary.ID) + // S3 seems to be highly eventually consistent. Even if one connection reports that the queue is gone, + // another connection may still report it as present. + _, err := tfresource.RetryUntilNotFound(ctx, tfs3.S3BucketPropagationTimeout, func() (interface{}, error) { + return nil, tfs3.FindBucket(ctx, conn, rs.Primary.ID) + }) - if tfresource.NotFound(err) { - continue + if errors.Is(err, tfresource.ErrFoundResource) { + return fmt.Errorf("S3 Bucket %s still exists", rs.Primary.ID) } if err != nil { return err } - return fmt.Errorf("S3 Bucket %s still exists", rs.Primary.ID) + continue } return nil diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 952663c63fb..d3e5f3fe48c 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -36,4 +36,5 @@ var ( ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration LifecycleRuleStatusDisabled = lifecycleRuleStatusDisabled LifecycleRuleStatusEnabled = lifecycleRuleStatusEnabled + S3BucketPropagationTimeout = s3BucketPropagationTimeout ) From 11b230b434dbed137d9b388dc4dd584911cf0a7e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 10:43:11 -0500 Subject: [PATCH 402/438] Acceptance test output: % make testacc TESTARGS='-run=TestAccS3Bucket_Basic_basic' PKG=s3 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3/... -v -count 1 -parallel 20 -run=TestAccS3Bucket_Basic_basic -timeout 360m === RUN TestAccS3Bucket_Basic_basic === PAUSE TestAccS3Bucket_Basic_basic === CONT TestAccS3Bucket_Basic_basic --- PASS: TestAccS3Bucket_Basic_basic (23.85s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3 29.112s From c51d42f8fa28456ba9532361b20c329f38bbce43 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 11:05:44 -0500 Subject: [PATCH 403/438] d/aws_mq_engine_versions: 'filter' -> 'engine_type'. --- .../service/mq/engine_versions_data_source.go | 58 +++++++------------ .../mq/engine_versions_data_source_test.go | 5 +- .../docs/d/mq_engine_versions.html.markdown | 12 +--- 3 files changed, 25 insertions(+), 50 deletions(-) diff --git a/internal/service/mq/engine_versions_data_source.go b/internal/service/mq/engine_versions_data_source.go index 3bcc4b54b76..c2e52e95a70 100644 --- a/internal/service/mq/engine_versions_data_source.go +++ b/internal/service/mq/engine_versions_data_source.go @@ -12,10 +12,9 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/diag" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" "github.com/hashicorp/terraform-provider-aws/internal/conns" - "github.com/hashicorp/terraform-provider-aws/internal/create" - "github.com/hashicorp/terraform-provider-aws/names" + "github.com/hashicorp/terraform-provider-aws/internal/enum" + "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" ) // Function annotations are used for datasource registration to the Provider. DO NOT EDIT. @@ -23,21 +22,8 @@ import ( func DataSourceEngineVersions() *schema.Resource { return &schema.Resource{ ReadWithoutTimeout: dataSourceEngineVersionsRead, + Schema: map[string]*schema.Schema{ - "filters": { - Type: schema.TypeSet, - Optional: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "engine_type": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringInSlice([]string{"ACTIVEMQ", "RABBITMQ"}, false), - }, - }, - }, - }, "broker_engine_types": { Type: schema.TypeList, Computed: true, @@ -62,46 +48,46 @@ func DataSourceEngineVersions() *schema.Resource { }, }, }, + "engine_type": { + Type: schema.TypeString, + Optional: true, + ValidateDiagFunc: enum.Validate[types.EngineType](), + }, }, } } -const ( - DSNameEngineVersions = "Engine Versions Data Source" -) - func dataSourceEngineVersionsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics client := meta.(*conns.AWSClient).MQClient(ctx) input := &mq.DescribeBrokerEngineTypesInput{} - if v, ok := d.GetOk("filters"); ok { - filters := v.(*schema.Set).List() - for _, filter := range filters { - f := filter.(map[string]interface{}) - if v, ok := f["engine_type"]; ok { - input.EngineType = aws.String(v.(string)) - } - } + + if v, ok := d.GetOk("engine_type"); ok { + input.EngineType = aws.String(v.(string)) } - d.SetId(id.UniqueId()) var engineTypes []types.BrokerEngineType for { - out, err := client.DescribeBrokerEngineTypes(ctx, input) + output, err := client.DescribeBrokerEngineTypes(ctx, input) + if err != nil { - return append(diags, create.DiagError(names.MQ, create.ErrActionReading, DSNameEngineVersions, "", err)...) + return sdkdiag.AppendErrorf(diags, "reading MQ Broker Engine Types: %s", err) } - engineTypes = append(engineTypes, out.BrokerEngineTypes...) - if out.NextToken == nil { + engineTypes = append(engineTypes, output.BrokerEngineTypes...) + + if output.NextToken == nil { break } - input.NextToken = out.NextToken + + input.NextToken = output.NextToken } + d.SetId(id.UniqueId()) + if err := d.Set("broker_engine_types", flattenBrokerList(engineTypes)); err != nil { - return append(diags, create.DiagError(names.MQ, create.ErrActionSetting, DSNameEngineVersions, d.Id(), err)...) + return sdkdiag.AppendErrorf(diags, "setting broker_engine_types: %s", err) } return diags diff --git a/internal/service/mq/engine_versions_data_source_test.go b/internal/service/mq/engine_versions_data_source_test.go index 73bdcd62a93..00d6f26c27c 100644 --- a/internal/service/mq/engine_versions_data_source_test.go +++ b/internal/service/mq/engine_versions_data_source_test.go @@ -24,7 +24,6 @@ func TestAccMQEngineVersionsDataSource_basic(t *testing.T) { }, ErrorCheck: acctest.ErrorCheck(t, mq.EndpointsID), ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: acctest.CheckDestroyNoop, Steps: []resource.TestStep{ { Config: testAccEngineVersionsDataSourceConfig_basic("ACTIVEMQ"), @@ -40,9 +39,7 @@ func TestAccMQEngineVersionsDataSource_basic(t *testing.T) { func testAccEngineVersionsDataSourceConfig_basic(engineType string) string { return fmt.Sprintf(` data "aws_mq_engine_versions" "test" { - filters { - engine_type = %[1]q - } + engine_type = %[1]q } `, engineType) } diff --git a/website/docs/d/mq_engine_versions.html.markdown b/website/docs/d/mq_engine_versions.html.markdown index fbc9c4c1d22..82dc9509e38 100644 --- a/website/docs/d/mq_engine_versions.html.markdown +++ b/website/docs/d/mq_engine_versions.html.markdown @@ -16,21 +16,13 @@ Terraform data source for managing an AWS MQ Engine Versions. ```terraform data "aws_mq_engine_versions" "example" { - filters { - engine_type = "ACTIVEMQ" - } + engine_type = "ACTIVEMQ" } ``` ## Argument Reference -* `filters` - Filters the results of the request. See [Filters](#filters). - -### filters - -The following filters are optional. - -* `engine_type` - (Optional) The database engine to return version details for. +* `engine_type` - (Optional) The MQ engine type to return version details for. ## Attribute Reference From 0ad3cd4939b98d79cead7b9707159275de0cf635 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 11:11:16 -0500 Subject: [PATCH 404/438] 'aws_mq_engine_versions' -> 'aws_mq_broker_engine_types'. --- .changelog/34232.txt | 2 +- ...source.go => broker_engine_types_data_source.go} | 13 ++++++------- ...t.go => broker_engine_types_data_source_test.go} | 10 +++++----- internal/service/mq/service_package_gen.go | 10 +++++----- ...l.markdown => broker_engine_types.html.markdown} | 10 +++++----- 5 files changed, 22 insertions(+), 23 deletions(-) rename internal/service/mq/{engine_versions_data_source.go => broker_engine_types_data_source.go} (85%) rename internal/service/mq/{engine_versions_data_source_test.go => broker_engine_types_data_source_test.go} (75%) rename website/docs/d/{mq_engine_versions.html.markdown => broker_engine_types.html.markdown} (69%) diff --git a/.changelog/34232.txt b/.changelog/34232.txt index 0f3e7ff3eb8..eaddc04a996 100644 --- a/.changelog/34232.txt +++ b/.changelog/34232.txt @@ -1,3 +1,3 @@ ```release-note:new-data-source -aws_mq_engine_versions +aws_mq_broker_engine_types ``` diff --git a/internal/service/mq/engine_versions_data_source.go b/internal/service/mq/broker_engine_types_data_source.go similarity index 85% rename from internal/service/mq/engine_versions_data_source.go rename to internal/service/mq/broker_engine_types_data_source.go index c2e52e95a70..912dafed251 100644 --- a/internal/service/mq/engine_versions_data_source.go +++ b/internal/service/mq/broker_engine_types_data_source.go @@ -17,11 +17,10 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/errs/sdkdiag" ) -// Function annotations are used for datasource registration to the Provider. DO NOT EDIT. -// @SDKDataSource("aws_mq_engine_versions", name="Engine Versions") -func DataSourceEngineVersions() *schema.Resource { +// @SDKDataSource("aws_mq_broker_engine_types", name="Broker Engine Types") +func DataSourceBrokerEngineTypes() *schema.Resource { return &schema.Resource{ - ReadWithoutTimeout: dataSourceEngineVersionsRead, + ReadWithoutTimeout: dataSourceBrokerEngineTypesRead, Schema: map[string]*schema.Schema{ "broker_engine_types": { @@ -49,15 +48,15 @@ func DataSourceEngineVersions() *schema.Resource { }, }, "engine_type": { - Type: schema.TypeString, - Optional: true, + Type: schema.TypeString, + Optional: true, ValidateDiagFunc: enum.Validate[types.EngineType](), }, }, } } -func dataSourceEngineVersionsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { +func dataSourceBrokerEngineTypesRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { var diags diag.Diagnostics client := meta.(*conns.AWSClient).MQClient(ctx) diff --git a/internal/service/mq/engine_versions_data_source_test.go b/internal/service/mq/broker_engine_types_data_source_test.go similarity index 75% rename from internal/service/mq/engine_versions_data_source_test.go rename to internal/service/mq/broker_engine_types_data_source_test.go index 00d6f26c27c..e703975aac1 100644 --- a/internal/service/mq/engine_versions_data_source_test.go +++ b/internal/service/mq/broker_engine_types_data_source_test.go @@ -12,9 +12,9 @@ import ( "github.com/hashicorp/terraform-provider-aws/internal/acctest" ) -func TestAccMQEngineVersionsDataSource_basic(t *testing.T) { +func TestAccMQBrokerEngineTypesDataSource_basic(t *testing.T) { ctx := acctest.Context(t) - dataSourceName := "data.aws_mq_engine_versions.test" + dataSourceName := "data.aws_mq_broker_engine_types.test" resource.ParallelTest(t, resource.TestCase{ PreCheck: func() { @@ -26,7 +26,7 @@ func TestAccMQEngineVersionsDataSource_basic(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, Steps: []resource.TestStep{ { - Config: testAccEngineVersionsDataSourceConfig_basic("ACTIVEMQ"), + Config: testAccBrokerEngineTypesDataSourceConfig_basic("ACTIVEMQ"), Check: resource.ComposeTestCheckFunc( resource.TestCheckResourceAttr(dataSourceName, "broker_engine_types.#", "1"), resource.TestCheckResourceAttr(dataSourceName, "broker_engine_types.0.engine_type", "ACTIVEMQ"), @@ -36,9 +36,9 @@ func TestAccMQEngineVersionsDataSource_basic(t *testing.T) { }) } -func testAccEngineVersionsDataSourceConfig_basic(engineType string) string { +func testAccBrokerEngineTypesDataSourceConfig_basic(engineType string) string { return fmt.Sprintf(` -data "aws_mq_engine_versions" "test" { +data "aws_mq_broker_engine_types" "test" { engine_type = %[1]q } `, engineType) diff --git a/internal/service/mq/service_package_gen.go b/internal/service/mq/service_package_gen.go index d6ff59beda0..aa815cc431d 100644 --- a/internal/service/mq/service_package_gen.go +++ b/internal/service/mq/service_package_gen.go @@ -32,13 +32,13 @@ func (p *servicePackage) SDKDataSources(ctx context.Context) []*types.ServicePac TypeName: "aws_mq_broker", }, { - Factory: DataSourceBrokerInstanceTypeOfferings, - TypeName: "aws_mq_broker_instance_type_offerings", + Factory: DataSourceBrokerEngineTypes, + TypeName: "aws_mq_broker_engine_types", + Name: "Broker Engine Types", }, { - Factory: DataSourceEngineVersions, - TypeName: "aws_mq_engine_versions", - Name: "Engine Versions", + Factory: DataSourceBrokerInstanceTypeOfferings, + TypeName: "aws_mq_broker_instance_type_offerings", }, } } diff --git a/website/docs/d/mq_engine_versions.html.markdown b/website/docs/d/broker_engine_types.html.markdown similarity index 69% rename from website/docs/d/mq_engine_versions.html.markdown rename to website/docs/d/broker_engine_types.html.markdown index 82dc9509e38..0a711a21659 100644 --- a/website/docs/d/mq_engine_versions.html.markdown +++ b/website/docs/d/broker_engine_types.html.markdown @@ -1,21 +1,21 @@ --- subcategory: "MQ" layout: "aws" -page_title: "AWS: aws_mq_engine_versions" +page_title: "AWS: aws_mq_broker_engine_types" description: |- - Terraform data source for managing an AWS MQ Engine Versions. + Retrieve information about available broker engines. --- -# Data Source: aws_mq_engine_versions +# Data Source: aws_mq_broker_engine_types -Terraform data source for managing an AWS MQ Engine Versions. +Retrieve information about available broker engines. ## Example Usage ### Basic Usage ```terraform -data "aws_mq_engine_versions" "example" { +data "aws_mq_broker_engine_types" "example" { engine_type = "ACTIVEMQ" } ``` From 3321c5783198ed5b5b85382437b0c872b165e845 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 11:23:41 -0500 Subject: [PATCH 405/438] r/aws_s3_bucket: Backwards compatibility for setting 'website_domain' and 'website_endpoint'. --- internal/service/s3/bucket.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index af8ebdf684f..a6192b45725 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1137,9 +1137,6 @@ func resourceBucketRead(ctx context.Context, d *schema.ResourceData, meta interf endpoint, domain := bucketWebsiteEndpointAndDomain(d.Id(), region) d.Set("website_domain", domain) d.Set("website_endpoint", endpoint) - } else { - d.Set("website_domain", nil) - d.Set("website_endpoint", nil) } // From b0f3b7144c99278c71fd95f0e793a236e0f453ef Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 11:25:55 -0500 Subject: [PATCH 406/438] Correct documentation file name. --- ...e_types.html.markdown => mq_broker_engine_types.html.markdown} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename website/docs/d/{broker_engine_types.html.markdown => mq_broker_engine_types.html.markdown} (100%) diff --git a/website/docs/d/broker_engine_types.html.markdown b/website/docs/d/mq_broker_engine_types.html.markdown similarity index 100% rename from website/docs/d/broker_engine_types.html.markdown rename to website/docs/d/mq_broker_engine_types.html.markdown From 3791f42b20dc24537ece69d60cb3b6e4b438a7a3 Mon Sep 17 00:00:00 2001 From: JamesJJ Date: Sat, 23 Dec 2023 00:40:12 +0800 Subject: [PATCH 407/438] Add support for logging_config in `aws_lambda_function` resource Signed-off-by: JamesJJ --- internal/service/lambda/function.go | 79 +++++++++++++++++ .../service/lambda/function_data_source.go | 27 ++++++ .../lambda/function_data_source_test.go | 79 +++++++++++++++++ internal/service/lambda/function_test.go | 88 +++++++++++++++++++ internal/service/lambda/validate.go | 9 ++ 5 files changed, 282 insertions(+) diff --git a/internal/service/lambda/function.go b/internal/service/lambda/function.go index d196ea4deeb..e5965c5edc8 100644 --- a/internal/service/lambda/function.go +++ b/internal/service/lambda/function.go @@ -232,6 +232,39 @@ func ResourceFunction() *schema.Resource { ValidateFunc: verify.ValidARN, }, }, + "logging_config": { + Type: schema.TypeList, + Optional: true, + Computed: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application_log_level": { + Type: schema.TypeString, + Optional: true, + Default: "", + ValidateDiagFunc: enum.Validate[types.ApplicationLogLevel](), + }, + "log_format": { + Type: schema.TypeString, + Required: true, + ValidateDiagFunc: enum.Validate[types.LogFormat](), + }, + "log_group": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validLogGroupName(), + }, + "system_log_level": { + Type: schema.TypeString, + Optional: true, + Default: "", + ValidateDiagFunc: enum.Validate[types.SystemLogLevel](), + }, + }, + }, + }, "memory_size": { Type: schema.TypeInt, Optional: true, @@ -518,6 +551,10 @@ func resourceFunctionCreate(ctx context.Context, d *schema.ResourceData, meta in input.ImageConfig = expandImageConfigs(v.([]interface{})) } + if v, ok := d.GetOk("logging_config"); ok && len(v.([]interface{})) > 0 { + input.LoggingConfig = expandLoggingConfig(v.([]interface{})) + } + if v, ok := d.GetOk("kms_key_arn"); ok { input.KMSKeyArn = aws.String(v.(string)) } @@ -645,6 +682,9 @@ func resourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta inte if err := d.Set("layers", flattenLayers(function.Layers)); err != nil { return sdkdiag.AppendErrorf(diags, "setting layers: %s", err) } + if err := d.Set("logging_config", flattenLoggingConfig(function.LoggingConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting logging_config: %s", err) + } d.Set("memory_size", function.MemorySize) d.Set("package_type", function.PackageType) if output.Concurrency != nil { @@ -828,6 +868,10 @@ func resourceFunctionUpdate(ctx context.Context, d *schema.ResourceData, meta in input.Layers = flex.ExpandStringValueList(d.Get("layers").([]interface{})) } + if d.HasChange("logging_config") { + input.LoggingConfig = expandLoggingConfig(d.Get("logging_config").([]interface{})) + } + if d.HasChange("memory_size") { input.MemorySize = aws.Int32(int32(d.Get("memory_size").(int))) } @@ -1251,6 +1295,7 @@ func needsFunctionConfigUpdate(d verify.ResourceDiffer) bool { d.HasChange("handler") || d.HasChange("file_system_config") || d.HasChange("image_config") || + d.HasChange("logging_config") || d.HasChange("memory_size") || d.HasChange("role") || d.HasChange("timeout") || @@ -1388,6 +1433,40 @@ func expandImageConfigs(imageConfigMaps []interface{}) *types.ImageConfig { return imageConfig } +func expandLoggingConfig(tfList []interface{}) *types.LoggingConfig { + loggingConfig := &types.LoggingConfig{} + if len(tfList) == 1 && tfList[0] != nil { + config := tfList[0].(map[string]interface{}) + if v := config["application_log_level"].(string); len(v) > 0 { + loggingConfig.ApplicationLogLevel = types.ApplicationLogLevel(v) + } + if v := config["log_format"].(string); len(v) > 0 { + loggingConfig.LogFormat = types.LogFormat(v) + } + if v := config["log_group"].(string); len(v) > 0 { + loggingConfig.LogGroup = aws.String(v) + } + if v := config["system_log_level"].(string); len(v) > 0 { + loggingConfig.SystemLogLevel = types.SystemLogLevel(v) + } + } + return loggingConfig +} + +func flattenLoggingConfig(apiObject *types.LoggingConfig) []map[string]interface{} { + if apiObject == nil { + return nil + } + m := map[string]interface{}{ + "application_log_level": string(apiObject.ApplicationLogLevel), + "log_format": string(apiObject.LogFormat), + "log_group": *apiObject.LogGroup, + "system_log_level": string(apiObject.SystemLogLevel), + } + + return []map[string]interface{}{m} +} + func flattenEphemeralStorage(response *types.EphemeralStorage) []map[string]interface{} { if response == nil { return nil diff --git a/internal/service/lambda/function_data_source.go b/internal/service/lambda/function_data_source.go index bd7e1377e1c..710932365fe 100644 --- a/internal/service/lambda/function_data_source.go +++ b/internal/service/lambda/function_data_source.go @@ -124,6 +124,30 @@ func DataSourceFunction() *schema.Resource { Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, }, + "logging_config": { + Type: schema.TypeList, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "application_log_level": { + Type: schema.TypeString, + Computed: true, + }, + "log_format": { + Type: schema.TypeString, + Computed: true, + }, + "log_group": { + Type: schema.TypeString, + Computed: true, + }, + "system_log_level": { + Type: schema.TypeString, + Computed: true, + }, + }, + }, + }, "memory_size": { Type: schema.TypeInt, Computed: true, @@ -295,6 +319,9 @@ func dataSourceFunctionRead(ctx context.Context, d *schema.ResourceData, meta in if err := d.Set("layers", flattenLayers(function.Layers)); err != nil { return sdkdiag.AppendErrorf(diags, "setting layers: %s", err) } + if err := d.Set("logging_config", flattenLoggingConfig(function.LoggingConfig)); err != nil { + return sdkdiag.AppendErrorf(diags, "setting logging_config: %s", err) + } d.Set("memory_size", function.MemorySize) d.Set("qualified_arn", qualifiedARN) d.Set("qualified_invoke_arn", functionInvokeARN(qualifiedARN, meta)) diff --git a/internal/service/lambda/function_data_source_test.go b/internal/service/lambda/function_data_source_test.go index 8ca95dfc1f3..2dc54557838 100644 --- a/internal/service/lambda/function_data_source_test.go +++ b/internal/service/lambda/function_data_source_test.go @@ -38,6 +38,11 @@ func TestAccLambdaFunctionDataSource_basic(t *testing.T) { resource.TestCheckResourceAttrPair(dataSourceName, "handler", resourceName, "handler"), resource.TestCheckResourceAttrPair(dataSourceName, "invoke_arn", resourceName, "invoke_arn"), resource.TestCheckResourceAttrPair(dataSourceName, "last_modified", resourceName, "last_modified"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.#", resourceName, "logging_config.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.application_log_level", resourceName, "logging_config.0.application_log_level"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.log_format", resourceName, "logging_config.0.log_format"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.log_group", resourceName, "logging_config.0.log_group"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.system_log_level", resourceName, "logging_config.0.system_log_level"), resource.TestCheckResourceAttrPair(dataSourceName, "memory_size", resourceName, "memory_size"), resource.TestCheckResourceAttrPair(dataSourceName, "qualified_arn", resourceName, "qualified_arn"), resource.TestCheckResourceAttrPair(dataSourceName, "qualified_invoke_arn", resourceName, "qualified_invoke_arn"), @@ -333,6 +338,37 @@ func TestAccLambdaFunctionDataSource_ephemeralStorage(t *testing.T) { }) } +func TestAccLambdaFunctionDataSource_loggingConfig(t *testing.T) { + ctx := acctest.Context(t) + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + dataSourceName := "data.aws_lambda_function.test" + resourceName := "aws_lambda_function.test" + checkFunc := resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttrPair(dataSourceName, "arn", resourceName, "arn"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.#", resourceName, "logging_config.#"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.application_log_level", resourceName, "logging_config.0.application_log_level"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.log_format", resourceName, "logging_config.0.log_format"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.log_group", resourceName, "logging_config.0.log_group"), + resource.TestCheckResourceAttrPair(dataSourceName, "logging_config.0.system_log_level", resourceName, "logging_config.0.system_log_level"), + ) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, lambda.EndpointsID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + Steps: []resource.TestStep{ + { + Config: testAccFunctionDataSourceConfig_loggingConfigStructured(rName), + Check: checkFunc, + }, + { + Config: testAccFunctionDataSourceConfig_loggingConfigText(rName), + Check: checkFunc, + }, + }, + }) +} + func testAccImageLatestPreCheck(t *testing.T) { if os.Getenv("AWS_LAMBDA_IMAGE_LATEST_ID") == "" { t.Skip("AWS_LAMBDA_IMAGE_LATEST_ID env var must be set for Lambda Function Data Source Image Support acceptance tests.") @@ -723,3 +759,46 @@ data "aws_lambda_function" "test" { } `, rName)) } + +func testAccFunctionDataSourceConfig_loggingConfigStructured(rName string) string { + return acctest.ConfigCompose(testAccFunctionDataSourceConfig_base(rName), fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + handler = "exports.example" + role = aws_iam_role.lambda.arn + runtime = "nodejs16.x" + + logging_config { + log_format = "JSON" + application_log_level = "DEBUG" + system_log_level = "WARN" + } +} + +data "aws_lambda_function" "test" { + function_name = aws_lambda_function.test.function_name +} +`, rName)) +} + +func testAccFunctionDataSourceConfig_loggingConfigText(rName string) string { + return acctest.ConfigCompose(testAccFunctionDataSourceConfig_base(rName), fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + handler = "exports.example" + role = aws_iam_role.lambda.arn + runtime = "nodejs16.x" + + logging_config { + log_format = "Text" + log_group = %[2]q + } +} + +data "aws_lambda_function" "test" { + function_name = aws_lambda_function.test.function_name +} +`, rName, rName+"_custom")) +} diff --git a/internal/service/lambda/function_test.go b/internal/service/lambda/function_test.go index b7123d11e0d..8794dd57e92 100644 --- a/internal/service/lambda/function_test.go +++ b/internal/service/lambda/function_test.go @@ -69,6 +69,11 @@ func TestAccLambdaFunction_basic(t *testing.T) { acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "lambda", fmt.Sprintf("function:%s", funcName)), resource.TestCheckResourceAttr(resourceName, "ephemeral_storage.#", "1"), resource.TestCheckResourceAttr(resourceName, "ephemeral_storage.0.size", "512"), + resource.TestCheckResourceAttr(resourceName, "logging_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.application_log_level", ""), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.log_format", "Text"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.log_group", fmt.Sprintf("/aws/lambda/%s", funcName)), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.system_log_level", ""), resource.TestCheckResourceAttr(resourceName, "package_type", string(types.PackageTypeZip)), acctest.CheckResourceAttrRegionalARN(resourceName, "qualified_arn", "lambda", fmt.Sprintf("function:%s:%s", funcName, tflambda.FunctionVersionLatest)), resource.TestCheckResourceAttr(resourceName, "reserved_concurrent_executions", "-1"), @@ -1235,6 +1240,50 @@ func TestAccLambdaFunction_ephemeralStorage(t *testing.T) { }) } +func TestAccLambdaFunction_loggingConfig(t *testing.T) { + ctx := acctest.Context(t) + var conf lambda.GetFunctionOutput + rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) + resourceName := "aws_lambda_function.test" + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(ctx, t) }, + ErrorCheck: acctest.ErrorCheck(t, names.LambdaEndpointID), + ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, + CheckDestroy: testAccCheckFunctionDestroy(ctx), + + Steps: []resource.TestStep{ + { + Config: testAccFunctionConfig_loggingConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "logging_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.application_log_level", ""), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.log_format", "Text"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.log_group", fmt.Sprintf("/aws/lambda/%s_custom", rName)), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.system_log_level", ""), + ), + }, + { + ResourceName: resourceName, + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"filename", "publish"}, + }, + { + Config: testAccFunctionConfig_updateLoggingConfig(rName), + Check: resource.ComposeTestCheckFunc( + testAccCheckFunctionExists(ctx, resourceName, &conf), + resource.TestCheckResourceAttr(resourceName, "logging_config.#", "1"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.application_log_level", "TRACE"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.log_format", "JSON"), + resource.TestCheckResourceAttr(resourceName, "logging_config.0.system_log_level", "DEBUG"), + ), + }, + }, + }) +} + func TestAccLambdaFunction_tracing(t *testing.T) { ctx := acctest.Context(t) if testing.Short() { @@ -3351,6 +3400,45 @@ resource "aws_lambda_function" "test" { `, rName)) } +func testAccFunctionConfig_loggingConfig(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLambdaBase(rName, rName, rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + role = aws_iam_role.iam_for_lambda.arn + handler = "exports.example" + runtime = "nodejs16.x" + + logging_config { + log_format = "Text" + log_group = %[2]q + } +} +`, rName, fmt.Sprintf("/aws/lambda/%s_custom", rName))) +} + +func testAccFunctionConfig_updateLoggingConfig(rName string) string { + return acctest.ConfigCompose( + acctest.ConfigLambdaBase(rName, rName, rName), + fmt.Sprintf(` +resource "aws_lambda_function" "test" { + filename = "test-fixtures/lambdatest.zip" + function_name = %[1]q + role = aws_iam_role.iam_for_lambda.arn + handler = "exports.example" + runtime = "nodejs16.x" + + logging_config { + application_log_level = "TRACE" + log_format = "JSON" + system_log_level = "DEBUG" + } +} +`, rName)) +} + func testAccFunctionConfig_tracing(rName string) string { return acctest.ConfigCompose( acctest.ConfigLambdaBase(rName, rName, rName), diff --git a/internal/service/lambda/validate.go b/internal/service/lambda/validate.go index 2c0a0edaca3..fba7291ffeb 100644 --- a/internal/service/lambda/validate.go +++ b/internal/service/lambda/validate.go @@ -47,3 +47,12 @@ func validPolicyStatementID() schema.SchemaValidateFunc { validation.StringLenBetween(1, 100), ) } + +func validLogGroupName() schema.SchemaValidateFunc { + // http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_CreateLogGroup.html + return validation.All( + validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z_./#-]+$`), "must contain alphanumeric characters, underscores,"+ + " hyphens, slashes, hash signs and dots only"), + validation.StringLenBetween(1, 512), + ) +} From bdd04acc5a73e95fd0680ce3db3112dd7422ddd3 Mon Sep 17 00:00:00 2001 From: JamesJJ Date: Sat, 23 Dec 2023 00:40:27 +0800 Subject: [PATCH 408/438] Add support for logging_config in `aws_lambda_function` resource (docs) Signed-off-by: JamesJJ --- .changelog/35050.txt | 3 +++ website/docs/d/lambda_function.html.markdown | 1 + website/docs/r/lambda_function.html.markdown | 16 ++++++++++++++++ 3 files changed, 20 insertions(+) create mode 100644 .changelog/35050.txt diff --git a/.changelog/35050.txt b/.changelog/35050.txt new file mode 100644 index 00000000000..5f6376645ec --- /dev/null +++ b/.changelog/35050.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_lambda_function: Add `logging_config` configuration block in support of [advanced logging controls](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced) +``` diff --git a/website/docs/d/lambda_function.html.markdown b/website/docs/d/lambda_function.html.markdown index 090b14bef67..45f9f8bec18 100644 --- a/website/docs/d/lambda_function.html.markdown +++ b/website/docs/d/lambda_function.html.markdown @@ -47,6 +47,7 @@ This data source exports the following attributes in addition to the arguments a * `kms_key_arn` - ARN for the KMS encryption key. * `last_modified` - Date this resource was last modified. * `layers` - List of Lambda Layer ARNs attached to your Lambda Function. +* `logging_config` - Advanced logging settings. * `memory_size` - Amount of memory in MB your Lambda Function can use at runtime. * `qualified_arn` - Qualified (`:QUALIFIER` or `:VERSION` suffix) ARN identifying your Lambda Function. See also `arn`. * `qualified_invoke_arn` - Qualified (`:QUALIFIER` or `:VERSION` suffix) ARN to be used for invoking Lambda Function from API Gateway. See also `invoke_arn`. diff --git a/website/docs/r/lambda_function.html.markdown b/website/docs/r/lambda_function.html.markdown index c4cbcca76ad..ef55d7b8437 100644 --- a/website/docs/r/lambda_function.html.markdown +++ b/website/docs/r/lambda_function.html.markdown @@ -198,6 +198,11 @@ variable "lambda_function_name" { resource "aws_lambda_function" "test_lambda" { function_name = var.lambda_function_name + # Advanced logging controls (optional) + logging_config { + log_format = "Text" + } + # ... other configuration ... depends_on = [ aws_iam_role_policy_attachment.lambda_logs, @@ -270,6 +275,7 @@ The following arguments are optional: * `image_uri` - (Optional) ECR image URI containing the function's deployment package. Exactly one of `filename`, `image_uri`, or `s3_bucket` must be specified. * `kms_key_arn` - (Optional) Amazon Resource Name (ARN) of the AWS Key Management Service (KMS) key that is used to encrypt environment variables. If this configuration is not provided when environment variables are in use, AWS Lambda uses a default service key. If this configuration is provided when environment variables are not in use, the AWS Lambda API does not save this configuration and Terraform will show a perpetual difference of adding the key. To fix the perpetual difference, remove this configuration. * `layers` - (Optional) List of Lambda Layer Version ARNs (maximum of 5) to attach to your Lambda Function. See [Lambda Layers][10] +* `logging_config` - (Optional) Configuration block used to specify advanced logging settings. Detailed below. * `memory_size` - (Optional) Amount of memory in MB your Lambda Function can use at runtime. Defaults to `128`. See [Limits][5] * `package_type` - (Optional) Lambda deployment package type. Valid values are `Zip` and `Image`. Defaults to `Zip`. * `publish` - (Optional) Whether to publish creation/change as new Lambda Function Version. Defaults to `false`. @@ -317,6 +323,15 @@ Container image configuration values that override the values in the container i * `entry_point` - (Optional) Entry point to your application, which is typically the location of the runtime executable. * `working_directory` - (Optional) Working directory. +### logging_config + +Advanced logging settings. See [Configuring advanced logging controls for your Lambda function][13]. + +* `application_log_level` - (Optional) for JSON structured logs, choose the detail level of the logs your application sends to CloudWatch when using supported logging libraries. +* `log_format` - (Required) select between `Text` and structured `JSON` format for your function's logs. +* `log_group` - (Optional) the CloudWatch log group your function sends logs to. +* `system_log_level` - (optional) for JSON structured logs, choose the detail level of the Lambda platform event logs sent to CloudWatch, such as `ERROR`, `DEBUG`, or `INFO`. + ### snap_start Snap start settings for low-latency startups. This feature is currently only supported for `java11` and `java17` runtimes. Remove this block to delete the associated settings (rather than setting `apply_on = "None"`). @@ -365,6 +380,7 @@ This resource exports the following attributes in addition to the arguments abov [10]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-layers.html [11]: https://learn.hashicorp.com/terraform/aws/lambda-api-gateway [12]: https://docs.aws.amazon.com/lambda/latest/dg/services-efs.html +[13]: https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced ## Timeouts From ffd78e529092e9d30c577f953f8a153da6663cf0 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 11:55:08 -0500 Subject: [PATCH 409/438] testAccCheckBucketAddObjectsWithLegalHold: Fix 'Content-MD5 OR x-amz-checksum- HTTP header is required for Put Object requests with Object Lock parameters'. --- internal/service/s3/bucket_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 109a4c94781..6e7465a5e67 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2557,6 +2557,7 @@ func testAccCheckBucketAddObjectsWithLegalHold(ctx context.Context, n string, ke for _, key := range keys { _, err := conn.PutObject(ctx, &s3.PutObjectInput{ Bucket: aws.String(rs.Primary.ID), + ChecksumAlgorithm: types.ChecksumAlgorithmCrc32, Key: aws.String(key), ObjectLockLegalHoldStatus: types.ObjectLockLegalHoldStatusOn, }) From 700d080f74606acd3d072bb14fae82b0c03116c2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 11:57:46 -0500 Subject: [PATCH 410/438] Fix typo. --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index a6192b45725..4791ff4b6d8 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1509,7 +1509,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte // Bucket Server-side Encryption Configuration. // if d.HasChange("server_side_encryption_configuration") { - if v, ok := d.GetOk("replication_configuration"); !ok || len(v.([]interface{})) == 0 { + if v, ok := d.GetOk("server_side_encryption_configuration"); !ok || len(v.([]interface{})) == 0 { _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { return conn.DeleteBucketEncryption(ctx, &s3.DeleteBucketEncryptionInput{ Bucket: aws.String(d.Id()), From fe0188c93673020b029897c135fe2b967c019148 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 22 Dec 2023 17:20:04 +0000 Subject: [PATCH 411/438] Update CHANGELOG.md for #35038 --- CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 94066b450d6..9bb0ec87ae0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,7 @@ FEATURES: +* **New Data Source:** `aws_mq_broker_engine_types` ([#34232](https://github.com/hashicorp/terraform-provider-aws/issues/34232)) * **New Resource:** `aws_ssoadmin_application_access_scope` ([#34811](https://github.com/hashicorp/terraform-provider-aws/issues/34811)) ENHANCEMENTS: @@ -11,6 +12,8 @@ ENHANCEMENTS: * resource/aws_batch_compute_environment: Add `update_policy` parameter ([#34353](https://github.com/hashicorp/terraform-provider-aws/issues/34353)) * resource/aws_dms_replication_task: Allow `cdc_start_time` to use [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) formatted dates in addition to UNIX timestamps ([#31917](https://github.com/hashicorp/terraform-provider-aws/issues/31917)) * resource/aws_dms_replication_task: Remove [ForceNew](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#forcenew) from `replication_instance_arn`, allowing in-place migration between DMS instances ([#30721](https://github.com/hashicorp/terraform-provider-aws/issues/30721)) +* resource/aws_lambda_function: Add support for `python3.12` `runtime` value ([#35049](https://github.com/hashicorp/terraform-provider-aws/issues/35049)) +* resource/aws_lambda_layer_version: Add support for `python3.12` `compatible_runtimes` value ([#35049](https://github.com/hashicorp/terraform-provider-aws/issues/35049)) * resource/aws_s3_bucket: Modify server-side encryption configuration error handling, enabling support for NetApp StorageGRID ([#34890](https://github.com/hashicorp/terraform-provider-aws/issues/34890)) BUG FIXES: From 5315df7e948e78b75b66b9bdec8400b49d1474bd Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 12:32:53 -0500 Subject: [PATCH 412/438] Fix 'expandBucketServerSideEncryptionRules'. --- internal/service/s3/bucket.go | 37 +++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 13 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 4791ff4b6d8..257c85bd757 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2779,25 +2779,36 @@ func flattenBucketSSEKMSEncryptedObjects(objects *types.SseKmsEncryptedObjects) // func expandBucketServerSideEncryptionRules(l []interface{}) []types.ServerSideEncryptionRule { + if len(l) == 0 || l[0] == nil { + return nil + } + + tfMap, ok := l[0].(map[string]interface{}) + if !ok { + return nil + } + var rules []types.ServerSideEncryptionRule - for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { - continue - } + if l, ok := tfMap["rule"].([]interface{}); ok && len(l) > 0 { + for _, tfMapRaw := range l { + tfMap, ok := tfMapRaw.(map[string]interface{}) + if !ok { + continue + } - rule := types.ServerSideEncryptionRule{} + rule := types.ServerSideEncryptionRule{} - if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { - rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionByDefault(v) - } + if v, ok := tfMap["apply_server_side_encryption_by_default"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + rule.ApplyServerSideEncryptionByDefault = expandBucketServerSideEncryptionByDefault(v) + } - if v, ok := tfMap["bucket_key_enabled"].(bool); ok { - rule.BucketKeyEnabled = aws.Bool(v) - } + if v, ok := tfMap["bucket_key_enabled"].(bool); ok { + rule.BucketKeyEnabled = aws.Bool(v) + } - rules = append(rules, rule) + rules = append(rules, rule) + } } return rules From 6363b248e734b7fca91e1c5c2a91f0ff97e06e18 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 12:33:16 -0500 Subject: [PATCH 413/438] Acceptance test output: % make testacc TESTARGS='-run=TestAccS3Bucket_Basic' PKG=s3 ACCTEST_PARALLELISM=2 ==> Checking that code complies with gofmt requirements... TF_ACC=1 go test ./internal/service/s3/... -v -count 1 -parallel 2 -run=TestAccS3Bucket_Basic -timeout 360m === RUN TestAccS3Bucket_Basic_basic === PAUSE TestAccS3Bucket_Basic_basic === RUN TestAccS3Bucket_Basic_emptyString === PAUSE TestAccS3Bucket_Basic_emptyString === RUN TestAccS3Bucket_Basic_nameGenerated === PAUSE TestAccS3Bucket_Basic_nameGenerated === RUN TestAccS3Bucket_Basic_namePrefix === PAUSE TestAccS3Bucket_Basic_namePrefix === RUN TestAccS3Bucket_Basic_forceDestroy === PAUSE TestAccS3Bucket_Basic_forceDestroy === RUN TestAccS3Bucket_Basic_forceDestroyWithObjectVersions === PAUSE TestAccS3Bucket_Basic_forceDestroyWithObjectVersions === RUN TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes === PAUSE TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes === RUN TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled === PAUSE TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled === RUN TestAccS3Bucket_Basic_acceleration === PAUSE TestAccS3Bucket_Basic_acceleration === RUN TestAccS3Bucket_Basic_keyEnabled === PAUSE TestAccS3Bucket_Basic_keyEnabled === RUN TestAccS3Bucket_Basic_requestPayer === PAUSE TestAccS3Bucket_Basic_requestPayer === CONT TestAccS3Bucket_Basic_basic === CONT TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes --- PASS: TestAccS3Bucket_Basic_forceDestroyWithEmptyPrefixes (22.21s) === CONT TestAccS3Bucket_Basic_namePrefix --- PASS: TestAccS3Bucket_Basic_basic (26.75s) === CONT TestAccS3Bucket_Basic_forceDestroyWithObjectVersions --- PASS: TestAccS3Bucket_Basic_namePrefix (25.25s) === CONT TestAccS3Bucket_Basic_forceDestroy --- PASS: TestAccS3Bucket_Basic_forceDestroyWithObjectVersions (25.54s) === CONT TestAccS3Bucket_Basic_nameGenerated --- PASS: TestAccS3Bucket_Basic_forceDestroy (20.70s) === CONT TestAccS3Bucket_Basic_emptyString --- PASS: TestAccS3Bucket_Basic_nameGenerated (25.31s) === CONT TestAccS3Bucket_Basic_keyEnabled --- PASS: TestAccS3Bucket_Basic_emptyString (24.56s) === CONT TestAccS3Bucket_Basic_requestPayer --- PASS: TestAccS3Bucket_Basic_keyEnabled (27.07s) === CONT TestAccS3Bucket_Basic_acceleration --- PASS: TestAccS3Bucket_Basic_requestPayer (42.40s) === CONT TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled --- PASS: TestAccS3Bucket_Basic_acceleration (44.84s) --- PASS: TestAccS3Bucket_Basic_forceDestroyWithObjectLockEnabled (25.03s) PASS ok github.com/hashicorp/terraform-provider-aws/internal/service/s3 165.750s From 5f30a7fd0af454653b2d538364afea79f10da74d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 22 Dec 2023 17:46:30 +0000 Subject: [PATCH 414/438] build(deps): bump the aws-sdk-go group with 32 updates Bumps the aws-sdk-go group with 32 updates: | Package | From | To | | --- | --- | --- | | [github.com/aws/aws-sdk-go](https://github.com/aws/aws-sdk-go) | `1.49.5` | `1.49.8` | | [github.com/aws/aws-sdk-go-v2/config](https://github.com/aws/aws-sdk-go-v2) | `1.26.1` | `1.26.2` | | [github.com/aws/aws-sdk-go-v2/feature/s3/manager](https://github.com/aws/aws-sdk-go-v2) | `1.15.8` | `1.15.9` | | [github.com/aws/aws-sdk-go-v2/service/accessanalyzer](https://github.com/aws/aws-sdk-go-v2) | `1.26.5` | `1.26.6` | | [github.com/aws/aws-sdk-go-v2/service/acm](https://github.com/aws/aws-sdk-go-v2) | `1.22.5` | `1.22.6` | | [github.com/aws/aws-sdk-go-v2/service/appconfig](https://github.com/aws/aws-sdk-go-v2) | `1.26.5` | `1.26.6` | | [github.com/aws/aws-sdk-go-v2/service/cloudcontrol](https://github.com/aws/aws-sdk-go-v2) | `1.15.5` | `1.15.6` | | [github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs](https://github.com/aws/aws-sdk-go-v2) | `1.30.0` | `1.30.1` | | [github.com/aws/aws-sdk-go-v2/service/codedeploy](https://github.com/aws/aws-sdk-go-v2) | `1.22.1` | `1.22.2` | | [github.com/aws/aws-sdk-go-v2/service/directoryservice](https://github.com/aws/aws-sdk-go-v2) | `1.22.5` | `1.22.6` | | [github.com/aws/aws-sdk-go-v2/service/ec2](https://github.com/aws/aws-sdk-go-v2) | `1.141.0` | `1.142.0` | | [github.com/aws/aws-sdk-go-v2/service/ecr](https://github.com/aws/aws-sdk-go-v2) | `1.24.5` | `1.24.6` | | [github.com/aws/aws-sdk-go-v2/service/eks](https://github.com/aws/aws-sdk-go-v2) | `1.36.0` | `1.37.0` | | [github.com/aws/aws-sdk-go-v2/service/emr](https://github.com/aws/aws-sdk-go-v2) | `1.35.5` | `1.35.6` | | [github.com/aws/aws-sdk-go-v2/service/identitystore](https://github.com/aws/aws-sdk-go-v2) | `1.21.6` | `1.21.7` | | [github.com/aws/aws-sdk-go-v2/service/internetmonitor](https://github.com/aws/aws-sdk-go-v2) | `1.10.5` | `1.10.6` | | [github.com/aws/aws-sdk-go-v2/service/lambda](https://github.com/aws/aws-sdk-go-v2) | `1.49.5` | `1.49.6` | | [github.com/aws/aws-sdk-go-v2/service/mediaconnect](https://github.com/aws/aws-sdk-go-v2) | `1.24.5` | `1.24.6` | | [github.com/aws/aws-sdk-go-v2/service/medialive](https://github.com/aws/aws-sdk-go-v2) | `1.43.3` | `1.44.0` | | [github.com/aws/aws-sdk-go-v2/service/oam](https://github.com/aws/aws-sdk-go-v2) | `1.7.5` | `1.7.6` | | [github.com/aws/aws-sdk-go-v2/service/rbin](https://github.com/aws/aws-sdk-go-v2) | `1.14.3` | `1.14.4` | | [github.com/aws/aws-sdk-go-v2/service/rds](https://github.com/aws/aws-sdk-go-v2) | `1.64.6` | `1.66.0` | | [github.com/aws/aws-sdk-go-v2/service/resourcegroups](https://github.com/aws/aws-sdk-go-v2) | `1.19.5` | `1.19.6` | | [github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi](https://github.com/aws/aws-sdk-go-v2) | `1.19.5` | `1.19.6` | | [github.com/aws/aws-sdk-go-v2/service/s3control](https://github.com/aws/aws-sdk-go-v2) | `1.41.6` | `1.41.7` | | [github.com/aws/aws-sdk-go-v2/service/securityhub](https://github.com/aws/aws-sdk-go-v2) | `1.44.0` | `1.44.1` | | [github.com/aws/aws-sdk-go-v2/service/servicequotas](https://github.com/aws/aws-sdk-go-v2) | `1.19.5` | `1.19.6` | | [github.com/aws/aws-sdk-go-v2/service/sns](https://github.com/aws/aws-sdk-go-v2) | `1.26.5` | `1.26.6` | | [github.com/aws/aws-sdk-go-v2/service/sqs](https://github.com/aws/aws-sdk-go-v2) | `1.29.5` | `1.29.6` | | [github.com/aws/aws-sdk-go-v2/service/ssm](https://github.com/aws/aws-sdk-go-v2) | `1.44.5` | `1.44.6` | | [github.com/aws/aws-sdk-go-v2/service/swf](https://github.com/aws/aws-sdk-go-v2) | `1.20.5` | `1.20.6` | | [github.com/aws/aws-sdk-go-v2/service/xray](https://github.com/aws/aws-sdk-go-v2) | `1.23.5` | `1.23.6` | Updates `github.com/aws/aws-sdk-go` from 1.49.5 to 1.49.8 - [Release notes](https://github.com/aws/aws-sdk-go/releases) - [Commits](https://github.com/aws/aws-sdk-go/compare/v1.49.5...v1.49.8) Updates `github.com/aws/aws-sdk-go-v2/config` from 1.26.1 to 1.26.2 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.26.1...config/v1.26.2) Updates `github.com/aws/aws-sdk-go-v2/feature/s3/manager` from 1.15.8 to 1.15.9 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/config/v1.15.9/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.15.8...config/v1.15.9) Updates `github.com/aws/aws-sdk-go-v2/service/accessanalyzer` from 1.26.5 to 1.26.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.26.5...service/s3/v1.26.6) Updates `github.com/aws/aws-sdk-go-v2/service/acm` from 1.22.5 to 1.22.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/pi/v1.22.5...service/pi/v1.22.6) Updates `github.com/aws/aws-sdk-go-v2/service/appconfig` from 1.26.5 to 1.26.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.26.5...service/s3/v1.26.6) Updates `github.com/aws/aws-sdk-go-v2/service/cloudcontrol` from 1.15.5 to 1.15.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/config/v1.15.5...config/v1.15.6) Updates `github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs` from 1.30.0 to 1.30.1 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.30.0...service/s3/v1.30.1) Updates `github.com/aws/aws-sdk-go-v2/service/codedeploy` from 1.22.1 to 1.22.2 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.22.1...v1.22.2) Updates `github.com/aws/aws-sdk-go-v2/service/directoryservice` from 1.22.5 to 1.22.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/pi/v1.22.5...service/pi/v1.22.6) Updates `github.com/aws/aws-sdk-go-v2/service/ec2` from 1.141.0 to 1.142.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ec2/v1.141.0...service/ec2/v1.142.0) Updates `github.com/aws/aws-sdk-go-v2/service/ecr` from 1.24.5 to 1.24.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecr/v1.24.5...service/ecr/v1.24.6) Updates `github.com/aws/aws-sdk-go-v2/service/eks` from 1.36.0 to 1.37.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/s3/v1.37.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.36.0...service/s3/v1.37.0) Updates `github.com/aws/aws-sdk-go-v2/service/emr` from 1.35.5 to 1.35.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecs/v1.35.5...service/ecs/v1.35.6) Updates `github.com/aws/aws-sdk-go-v2/service/identitystore` from 1.21.6 to 1.21.7 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ebs/v1.21.6...service/efs/v1.21.7) Updates `github.com/aws/aws-sdk-go-v2/service/internetmonitor` from 1.10.5 to 1.10.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.10.5...service/rum/v1.10.6) Updates `github.com/aws/aws-sdk-go-v2/service/lambda` from 1.49.5 to 1.49.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/lambda/v1.49.5...service/lambda/v1.49.6) Updates `github.com/aws/aws-sdk-go-v2/service/mediaconnect` from 1.24.5 to 1.24.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ecr/v1.24.5...service/ecr/v1.24.6) Updates `github.com/aws/aws-sdk-go-v2/service/medialive` from 1.43.3 to 1.44.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.43.3...service/s3/v1.44.0) Updates `github.com/aws/aws-sdk-go-v2/service/oam` from 1.7.5 to 1.7.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/m2/v1.7.5...service/m2/v1.7.6) Updates `github.com/aws/aws-sdk-go-v2/service/rbin` from 1.14.3 to 1.14.4 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/mq/v1.14.4/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.14.3...service/mq/v1.14.4) Updates `github.com/aws/aws-sdk-go-v2/service/rds` from 1.64.6 to 1.66.0 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Changelog](https://github.com/aws/aws-sdk-go-v2/blob/service/ec2/v1.66.0/CHANGELOG.md) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/rds/v1.64.6...service/ec2/v1.66.0) Updates `github.com/aws/aws-sdk-go-v2/service/resourcegroups` from 1.19.5 to 1.19.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.19.5...service/efs/v1.19.6) Updates `github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi` from 1.19.5 to 1.19.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.19.5...service/efs/v1.19.6) Updates `github.com/aws/aws-sdk-go-v2/service/s3control` from 1.41.6 to 1.41.7 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3control/v1.41.6...service/s3control/v1.41.7) Updates `github.com/aws/aws-sdk-go-v2/service/securityhub` from 1.44.0 to 1.44.1 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.44.0...service/iot/v1.44.1) Updates `github.com/aws/aws-sdk-go-v2/service/servicequotas` from 1.19.5 to 1.19.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.19.5...service/efs/v1.19.6) Updates `github.com/aws/aws-sdk-go-v2/service/sns` from 1.26.5 to 1.26.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.26.5...service/s3/v1.26.6) Updates `github.com/aws/aws-sdk-go-v2/service/sqs` from 1.29.5 to 1.29.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/s3/v1.29.5...service/s3/v1.29.6) Updates `github.com/aws/aws-sdk-go-v2/service/ssm` from 1.44.5 to 1.44.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/ssm/v1.44.5...service/ssm/v1.44.6) Updates `github.com/aws/aws-sdk-go-v2/service/swf` from 1.20.5 to 1.20.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/service/mq/v1.20.5...service/mq/v1.20.6) Updates `github.com/aws/aws-sdk-go-v2/service/xray` from 1.23.5 to 1.23.6 - [Release notes](https://github.com/aws/aws-sdk-go-v2/releases) - [Commits](https://github.com/aws/aws-sdk-go-v2/compare/v1.23.5...service/ram/v1.23.6) --- updated-dependencies: - dependency-name: github.com/aws/aws-sdk-go dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/config dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/feature/s3/manager dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/accessanalyzer dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/acm dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/appconfig dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/cloudcontrol dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/codedeploy dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/directoryservice dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ec2 dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ecr dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/eks dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/emr dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/identitystore dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/internetmonitor dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/lambda dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/mediaconnect dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/medialive dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/oam dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/rbin dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/rds dependency-type: direct:production update-type: version-update:semver-minor dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/resourcegroups dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/s3control dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/securityhub dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/servicequotas dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/sns dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/sqs dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/ssm dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/swf dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go - dependency-name: github.com/aws/aws-sdk-go-v2/service/xray dependency-type: direct:production update-type: version-update:semver-patch dependency-group: aws-sdk-go ... Signed-off-by: dependabot[bot] --- go.mod | 70 ++++++++++++++--------------- go.sum | 140 ++++++++++++++++++++++++++++----------------------------- 2 files changed, 105 insertions(+), 105 deletions(-) diff --git a/go.mod b/go.mod index 3291a8c8a45..12eb9ada778 100644 --- a/go.mod +++ b/go.mod @@ -5,15 +5,15 @@ go 1.20 require ( github.com/ProtonMail/go-crypto v0.0.0-20230923063757-afb1ddc0824c github.com/YakDriver/regexache v0.23.0 - github.com/aws/aws-sdk-go v1.49.5 + github.com/aws/aws-sdk-go v1.49.8 github.com/aws/aws-sdk-go-v2 v1.24.0 - github.com/aws/aws-sdk-go-v2/config v1.26.1 + github.com/aws/aws-sdk-go-v2/config v1.26.2 github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8 - github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.9 + github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.6 github.com/aws/aws-sdk-go-v2/service/account v1.14.5 - github.com/aws/aws-sdk-go-v2/service/acm v1.22.5 - github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.5 + github.com/aws/aws-sdk-go-v2/service/acm v1.22.6 + github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.6 github.com/aws/aws-sdk-go-v2/service/appfabric v1.5.5 github.com/aws/aws-sdk-go-v2/service/appflow v1.39.5 github.com/aws/aws-sdk-go-v2/service/apprunner v1.25.5 @@ -23,10 +23,10 @@ require ( github.com/aws/aws-sdk-go-v2/service/chimesdkmediapipelines v1.13.5 github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5 github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5 - github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5 - github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0 + github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.6 + github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.1 github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5 - github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1 + github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.2 github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5 github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.21.5 github.com/aws/aws-sdk-go-v2/service/codestarnotifications v1.20.5 @@ -35,71 +35,71 @@ require ( github.com/aws/aws-sdk-go-v2/service/connectcases v1.12.5 github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6 github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5 - github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 + github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.6 github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 - github.com/aws/aws-sdk-go-v2/service/eks v1.36.0 - github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 + github.com/aws/aws-sdk-go-v2/service/ecr v1.24.6 + github.com/aws/aws-sdk-go-v2/service/eks v1.37.0 + github.com/aws/aws-sdk-go-v2/service/emr v1.35.6 github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 github.com/aws/aws-sdk-go-v2/service/finspace v1.20.0 github.com/aws/aws-sdk-go-v2/service/fis v1.21.5 github.com/aws/aws-sdk-go-v2/service/glacier v1.19.5 github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 - github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.6 + github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.7 github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 - github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5 + github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.6 github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5 github.com/aws/aws-sdk-go-v2/service/kafka v1.28.5 github.com/aws/aws-sdk-go-v2/service/kendra v1.47.5 github.com/aws/aws-sdk-go-v2/service/keyspaces v1.7.5 - github.com/aws/aws-sdk-go-v2/service/lambda v1.49.5 + github.com/aws/aws-sdk-go-v2/service/lambda v1.49.6 github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.38.5 github.com/aws/aws-sdk-go-v2/service/lightsail v1.32.5 github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.25.5 - github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.5 - github.com/aws/aws-sdk-go-v2/service/medialive v1.43.3 + github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.6 + github.com/aws/aws-sdk-go-v2/service/medialive v1.44.0 github.com/aws/aws-sdk-go-v2/service/mediapackage v1.28.5 github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.7.5 github.com/aws/aws-sdk-go-v2/service/mq v1.20.6 - github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 + github.com/aws/aws-sdk-go-v2/service/oam v1.7.6 github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 github.com/aws/aws-sdk-go-v2/service/pipes v1.9.6 github.com/aws/aws-sdk-go-v2/service/polly v1.36.5 github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 - github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3 - github.com/aws/aws-sdk-go-v2/service/rds v1.64.6 + github.com/aws/aws-sdk-go-v2/service/rbin v1.14.4 + github.com/aws/aws-sdk-go-v2/service/rds v1.66.0 github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5 github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5 - github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5 - github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 + github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.6 + github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.6 github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 - github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6 - github.com/aws/aws-sdk-go-v2/service/s3control v1.41.6 + github.com/aws/aws-sdk-go-v2/service/s3 v1.47.7 + github.com/aws/aws-sdk-go-v2/service/s3control v1.41.7 github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5 - github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0 + github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.1 github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5 - github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.5 + github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.6 github.com/aws/aws-sdk-go-v2/service/sesv2 v1.24.5 github.com/aws/aws-sdk-go-v2/service/signer v1.19.6 - github.com/aws/aws-sdk-go-v2/service/sns v1.26.5 - github.com/aws/aws-sdk-go-v2/service/sqs v1.29.5 - github.com/aws/aws-sdk-go-v2/service/ssm v1.44.5 + github.com/aws/aws-sdk-go-v2/service/sns v1.26.6 + github.com/aws/aws-sdk-go-v2/service/sqs v1.29.6 + github.com/aws/aws-sdk-go-v2/service/ssm v1.44.6 github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.20.5 github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.27.5 github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.23.5 - github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 - github.com/aws/aws-sdk-go-v2/service/swf v1.20.5 + github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 + github.com/aws/aws-sdk-go-v2/service/swf v1.20.6 github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.23.6 github.com/aws/aws-sdk-go-v2/service/transcribe v1.34.5 github.com/aws/aws-sdk-go-v2/service/verifiedpermissions v1.8.3 github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5 github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 - github.com/aws/aws-sdk-go-v2/service/xray v1.23.5 + github.com/aws/aws-sdk-go-v2/service/xray v1.23.6 github.com/beevik/etree v1.2.0 github.com/davecgh/go-spew v1.1.1 github.com/gertd/go-pluralize v0.2.1 @@ -148,7 +148,7 @@ require ( github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/armon/go-radix v1.0.0 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 // indirect - github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.13 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 // indirect github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect diff --git a/go.sum b/go.sum index 3206f77c796..cc06b8535e4 100644 --- a/go.sum +++ b/go.sum @@ -21,20 +21,20 @@ github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmms github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aws/aws-sdk-go v1.49.5 h1:y2yfBlwjPDi3/sBVKeznYEdDy6wIhjA2L5NCBMLUIYA= -github.com/aws/aws-sdk-go v1.49.5/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.49.8 h1:gKgEiyJ8CPnr4r6pS06WfNXvp6z34JER1pBIwuocvVA= +github.com/aws/aws-sdk-go v1.49.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= github.com/aws/aws-sdk-go-v2 v1.24.0 h1:890+mqQ+hTpNuw0gGP6/4akolQkSToDJgHfQE7AwGuk= github.com/aws/aws-sdk-go-v2 v1.24.0/go.mod h1:LNh45Br1YAkEKaAqvmE1m8FUx6a5b/V0oAKV7of29b4= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4 h1:OCs21ST2LrepDfD3lwlQiOqIGp6JiEUqG84GzTDoyJs= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.5.4/go.mod h1:usURWEKSNNAcAZuzRn/9ZYPT8aZQkR7xcCtunK/LkJo= -github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o= -github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg= -github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU= -github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= +github.com/aws/aws-sdk-go-v2/config v1.26.2 h1:+RWLEIWQIGgrz2pBPAUoGgNGs1TOyF4Hml7hCnYj2jc= +github.com/aws/aws-sdk-go-v2/config v1.26.2/go.mod h1:l6xqvUxt0Oj7PI/SUXYLNyZ9T/yBPn3YTQcJLLOdtR8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13 h1:WLABQ4Cp4vXtXfOWOS3MEZKr6AAYUpMczLhgKtAjQ/8= +github.com/aws/aws-sdk-go-v2/credentials v1.16.13/go.mod h1:Qg6x82FXwW0sJHzYruxGiuApNo31UEtJvXVSZAXeWiw= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8 h1:7wCngExMTAW2Bjf0Y92uWap6ZUcenLLWI5T3VJiQneU= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.8/go.mod h1:XVrAWYYM4ZRwOCOuLoUiao5hbLqNutEdqwCR3ZvkXgc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.9 h1:5zA8qVCXMPGt6YneFnll5B157SfdK2SewU85PH9/yM0= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.15.9/go.mod h1:t4gy210hPxkbtYM8xOzrWdxVq1PyekR76OOKXy3s0Vs= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9 h1:v+HbZaCGmOwnTTVS86Fleq0vPzOd7tnJGbFhP0stNLs= github.com/aws/aws-sdk-go-v2/internal/configsources v1.2.9/go.mod h1:Xjqy+Nyj7VDLBtCMkQYOw1QYfAEZCVLrfI0ezve8wd4= github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.5.9 h1:N94sVhRACtXyVcjXxrwK1SKFIJrA9pOJ5yu2eSHnmls= @@ -43,14 +43,14 @@ github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsM github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9 h1:ugD6qzjYtB7zM5PN/ZIeaAIyefPaD82G8+SJopgvUpw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.2.9/go.mod h1:YD0aYBWCrPENpHolhKw2XDlTIWae2GKXT1T4o6N6hiM= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5 h1:UdqJHYgBmOYhVA1ixaECd4MTS7EoqWdDeP87YuDauB0= -github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.5/go.mod h1:grUsKCGlUQ80qedCiWN8LMlqmm97v81jr/sM1GXBjfg= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.6 h1:a4VYKmISU3AgNiGKbd5rTr1oLLRw26KPUAYxyv4G1f8= +github.com/aws/aws-sdk-go-v2/service/accessanalyzer v1.26.6/go.mod h1:grUsKCGlUQ80qedCiWN8LMlqmm97v81jr/sM1GXBjfg= github.com/aws/aws-sdk-go-v2/service/account v1.14.5 h1:sAXBYGqq4J/cPrtBrzXbEOSiYToW69qVF7heXDzcGKE= github.com/aws/aws-sdk-go-v2/service/account v1.14.5/go.mod h1:fvSp4SHBg07Gig7K7mEsO1XUK1jnT+BZRg6oWiOMigY= -github.com/aws/aws-sdk-go-v2/service/acm v1.22.5 h1:GNTWQH4PWazAsb3VXePxGKwzi7OiU8AedMajRJoQEQ8= -github.com/aws/aws-sdk-go-v2/service/acm v1.22.5/go.mod h1:yAwtFXtwrusYjymwgH4ofDG3by5KZvoBt8m87zYzotY= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.5 h1:voFN9YKZU3UDxLpp+5vZ8IUXELHNrDx1nV1kH2TQbyg= -github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.5/go.mod h1:HFw+8y3iu+08dKbz+IhHyGcZsLx9cq/NrMmL8rOdD0U= +github.com/aws/aws-sdk-go-v2/service/acm v1.22.6 h1:eaVGp4Vurey4qT7Fly4LKlBmuY5o357n/jmtL9VIc4M= +github.com/aws/aws-sdk-go-v2/service/acm v1.22.6/go.mod h1:yAwtFXtwrusYjymwgH4ofDG3by5KZvoBt8m87zYzotY= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.6 h1:4sbjf1gecQ2e6bOXbHc1L5AFve3twLqz/9JjiPwx7bw= +github.com/aws/aws-sdk-go-v2/service/appconfig v1.26.6/go.mod h1:HFw+8y3iu+08dKbz+IhHyGcZsLx9cq/NrMmL8rOdD0U= github.com/aws/aws-sdk-go-v2/service/appfabric v1.5.5 h1:Geh29to5AKWZfFYToHQ4UxNrwvtyLN4ZoNhN+qMldIg= github.com/aws/aws-sdk-go-v2/service/appfabric v1.5.5/go.mod h1:ET7VpGqYxeyYtA7JmNZQ3+YPZ+lz98P3OvghaTdwaFE= github.com/aws/aws-sdk-go-v2/service/appflow v1.39.5 h1:zLBG7nAE9TG2WBpHjhMRNOdRDEQ9ylI7Jb865MKv+KE= @@ -69,14 +69,14 @@ github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5 h1:Th+kzme/nRjTdxBhvy github.com/aws/aws-sdk-go-v2/service/chimesdkvoice v1.12.5/go.mod h1:9ETC6GsMWygbmUdP8IkjSgXNqy8pwFEgH/eehoDwYMU= github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5 h1:HH9fmVqF71UES7ES8+vAnJ7/3igo5rJp1BtgScHAdHs= github.com/aws/aws-sdk-go-v2/service/cleanrooms v1.8.5/go.mod h1:6nxVpS0JBdSwXDm+vo+Hwz/CJn03vu6HexNB7bQSv3Y= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5 h1:9aS9PZ/cnTVjWDIOVqgxKd+cRxP9W1MYrQhXwh/vBec= -github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.5/go.mod h1:21V6X5ZV37Oel5VQZRZtxMj6jeqQr6sMbhuWu9oTaH0= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0 h1:CMZz/TJgt+GMKRxjuedxhMFs45GPhyst/a/7Q3DuAg4= -github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.0/go.mod h1:4Oeb7n2r/ApBIHphQkprve380p/RpPWBotumd44EDGg= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.6 h1:IwaYLYSyYZsVUkn0ux76E/5+5wAIVjFkW44LRNvmMjk= +github.com/aws/aws-sdk-go-v2/service/cloudcontrol v1.15.6/go.mod h1:21V6X5ZV37Oel5VQZRZtxMj6jeqQr6sMbhuWu9oTaH0= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.1 h1:ZMgx58Tqyr8kTSR9zLzX+W933ujDYleOtFedvn0xHg8= +github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs v1.30.1/go.mod h1:4Oeb7n2r/ApBIHphQkprve380p/RpPWBotumd44EDGg= github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5 h1:52hjOAJdIm0P2MWM14J7aLKtcT8SItEtdluW+5LbWSo= github.com/aws/aws-sdk-go-v2/service/codecatalyst v1.10.5/go.mod h1:8GW1bxNLHWPRwtpJKNn8z0h2N6nKgoAsN4CjeAMIrLA= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1 h1:cyRoT4yeLGEQk8ad4Se82INAA8Xcu6xr1grQ684GYnQ= -github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.1/go.mod h1:RiusqJl55/p7S8LNMh2J3ZsDHDqxRiPdsfIaZRKeEUo= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.2 h1:3b8fwDhM0bJoOVglvM1w4665Ry6mrb3Rp+AH8iGI6qg= +github.com/aws/aws-sdk-go-v2/service/codedeploy v1.22.2/go.mod h1:RiusqJl55/p7S8LNMh2J3ZsDHDqxRiPdsfIaZRKeEUo= github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5 h1:Jw0fM7521qn4edNQKiq3KUwdxY1c3iPnnOBgIzUoXz4= github.com/aws/aws-sdk-go-v2/service/codeguruprofiler v1.18.5/go.mod h1:RDeY2hgSGG8yoZBaBH8I9h89Wz7BVhnVRtaNaKQRELM= github.com/aws/aws-sdk-go-v2/service/codestarconnections v1.21.5 h1:AlmfzS3CBH1OMXjFU8sy7JMa1xIPC1n0Ke4zvOaEHRo= @@ -93,20 +93,20 @@ github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6 h1:Sb6qOCo2oD9iGJ+0gyC github.com/aws/aws-sdk-go-v2/service/controltower v1.10.6/go.mod h1:HIRn9vSg38bhAI8BlxIWXl/i8qPruJzon9kPOeD31Ng= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5 h1:a//AdeswzibpC4fkkB1X4Ql/4iWZKGyYV0lWNTRDp1w= github.com/aws/aws-sdk-go-v2/service/customerprofiles v1.34.5/go.mod h1:Dst4mNfdyggL9PHmkYdSiVgJvwhfboruXtzQZpy46Xs= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5 h1:i/7aXIrjTdVZtch90MSQ3EC03dh5XgTmJtbAqFtzysk= -github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.5/go.mod h1:KTFSRANgKK34D1LNNtOkPLWVgjhbx172XAQ1cDkP+08= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.6 h1:TJ1ZtV57GYfVGlrFLthjBF1NfjVmvWz1jMl9ndx06o8= +github.com/aws/aws-sdk-go-v2/service/directoryservice v1.22.6/go.mod h1:KTFSRANgKK34D1LNNtOkPLWVgjhbx172XAQ1cDkP+08= github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5 h1:ikZu83oYYnSdtc73OP1HCBXuSxQ9AXDEebHhgnTpGDA= github.com/aws/aws-sdk-go-v2/service/docdbelastic v1.6.5/go.mod h1:XEY63kzpXT3wMrE6yBqWCY+K1bq5Fixq32eCZYFhwpA= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6 h1:kSdpnPOZL9NG5QHoKL5rTsdY+J+77hr+vqVMsPeyNe0= github.com/aws/aws-sdk-go-v2/service/dynamodb v1.26.6/go.mod h1:o7TD9sjdgrl8l/g2a2IkYjuhxjPy9DMP2sWo7piaRBQ= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0 h1:cP43vFYAQyREOp972C+6d4+dzpxo3HolNvWfeBvr2Yg= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.141.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5 h1:wLPDAUFT50NEXGXpywRU3AA74pg35RJjWol/68ruvQQ= -github.com/aws/aws-sdk-go-v2/service/ecr v1.24.5/go.mod h1:AOHmGMoPtSY9Zm2zBuwUJQBisIvYAZeA1n7b6f4e880= -github.com/aws/aws-sdk-go-v2/service/eks v1.36.0 h1:5jk86RO+sFu2BjMz2GcQ9Yf2IEi2Ntec2wPOt/lDc5c= -github.com/aws/aws-sdk-go-v2/service/eks v1.36.0/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= -github.com/aws/aws-sdk-go-v2/service/emr v1.35.5 h1:dZtEDpqYVg3i5oT8lSXxEsg6dInewHA3qNuyzHTvWck= -github.com/aws/aws-sdk-go-v2/service/emr v1.35.5/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0 h1:VrFC1uEZjX4ghkm/et8ATVGb1mT75Iv8aPKPjUE+F8A= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.142.0/go.mod h1:qjhtI9zjpUHRc6khtrIM9fb48+ii6+UikL3/b+MKYn0= +github.com/aws/aws-sdk-go-v2/service/ecr v1.24.6 h1:cT7h+GWP2k0hJSsPmppKgxl4C9R6gCC5/oF4oHnmpK4= +github.com/aws/aws-sdk-go-v2/service/ecr v1.24.6/go.mod h1:AOHmGMoPtSY9Zm2zBuwUJQBisIvYAZeA1n7b6f4e880= +github.com/aws/aws-sdk-go-v2/service/eks v1.37.0 h1:tCIkZ/ZdJMGZ1MOwdcioYhOUkkD4F58KFvQTgR3ZIlc= +github.com/aws/aws-sdk-go-v2/service/eks v1.37.0/go.mod h1:L1uv3UgQlAkdM9v0gpec7nnfUiQkCnGMjBE7MJArfWQ= +github.com/aws/aws-sdk-go-v2/service/emr v1.35.6 h1:eJ1K3IaNErKlR8MXg0c4aMjPpvp9qI39WCzx50u+XtQ= +github.com/aws/aws-sdk-go-v2/service/emr v1.35.6/go.mod h1:Drh6y2qLaw/wnDKTIcdqM2m358MIRXsZ2Bj2tjhVLq0= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6 h1:O2ppygCppB40GS7lDJUX4dGEgEdsKkX62oIAGgre/rY= github.com/aws/aws-sdk-go-v2/service/emrserverless v1.14.6/go.mod h1:G2r5cqojvwkdJJx6NDxszEfHC8f02TF15dE/3bg8P9A= github.com/aws/aws-sdk-go-v2/service/evidently v1.16.5 h1:qMMMld3RbqxSZ5KEokAu+w4MGV9YlSvisJbk4iMO4m0= @@ -121,8 +121,8 @@ github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5 h1:lm7KEWrkI54kso0o3qwOD github.com/aws/aws-sdk-go-v2/service/healthlake v1.20.5/go.mod h1:5IxzIDau0tsh8NRR6wcRp8u1Xn9QY9CcD9e34lpFqEQ= github.com/aws/aws-sdk-go-v2/service/iam v1.28.5 h1:Ts2eDDuMLrrmd0ARlg5zSoBQUvhdthgiNnPdiykTJs0= github.com/aws/aws-sdk-go-v2/service/iam v1.28.5/go.mod h1:kKI0gdVsf+Ev9knh/3lBJbchtX5LLNH25lAzx3KDj3Q= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.6 h1:myI4L7UVKRDV1m97FRh0UUbTvsexqRanej7iXLLeLyc= -github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.6/go.mod h1:vs4IYQdGHOLq6DsPfSuoADmRzr/AeWIk8m50XBnwN/o= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.7 h1:OE7bZWyA8Eo61zc178BcvA54AkmBVkQ9rOkTi2jHRUw= +github.com/aws/aws-sdk-go-v2/service/identitystore v1.21.7/go.mod h1:vs4IYQdGHOLq6DsPfSuoADmRzr/AeWIk8m50XBnwN/o= github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5 h1:PKwE3fh67K7Kig3LlbuipQOrNSraQuEpFl09VOpaNvc= github.com/aws/aws-sdk-go-v2/service/inspector2 v1.20.5/go.mod h1:hIgLcOPNanV8IteYZUx1YyLUJf//t0dI1F2+ecjVvlo= github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= @@ -135,8 +135,8 @@ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9 h1:iEAeF6YC3l4FzlJPP9H3Ko1TXpdjdqWffxXjp8SY6uk= github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.16.9/go.mod h1:kjsXoK23q9Z/tLBrckZLLyvjhZoS+AGrzqzUfEClvMM= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5 h1:05ZNe2xprVADbOPDOVpBiwHAkits4ftok77kqYR58Ro= -github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.5/go.mod h1:EGOD8sGU5W6NO+TgfZeEPv3WdEB+NyCtJ5KET2kJWZI= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.6 h1:q4/pRkKLR+lv2N3HSlBcmO0v+LTYWxE32Tfr9ZZ8nOI= +github.com/aws/aws-sdk-go-v2/service/internetmonitor v1.10.6/go.mod h1:EGOD8sGU5W6NO+TgfZeEPv3WdEB+NyCtJ5KET2kJWZI= github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5 h1:c6B43g0FFZ51zIUYgHSnPv0BDP4e6DTVUw6gi1oy+wg= github.com/aws/aws-sdk-go-v2/service/ivschat v1.10.5/go.mod h1:i+IKSFp4gZZj54Ffu0skGoV/3ilRQdLH9eIZij3pTEI= github.com/aws/aws-sdk-go-v2/service/kafka v1.28.5 h1:yCkyZDGahaCaAkdpVx8Te05t6eW2FarBLunVC8S23nU= @@ -145,26 +145,26 @@ github.com/aws/aws-sdk-go-v2/service/kendra v1.47.5 h1:cbNxSjOL87ojmhzOmAFEZ2C3V github.com/aws/aws-sdk-go-v2/service/kendra v1.47.5/go.mod h1:ZJKXlOfOrm/3tB501yY0yo9NOCWSAVsWRLYiS61GO8M= github.com/aws/aws-sdk-go-v2/service/keyspaces v1.7.5 h1:lvhWIY+MyMYoSPBLfZsgyLkpkqAyNMX/mAkGXbkzslk= github.com/aws/aws-sdk-go-v2/service/keyspaces v1.7.5/go.mod h1:YVdR8FtIDbHvsDkXuBa1ahRC+OhegEZY76h2k3ecLkg= -github.com/aws/aws-sdk-go-v2/service/lambda v1.49.5 h1:ZHVbzOnoj5nXxUug8iWzqg2Tmp6Jc4CE5tPfoE96qrs= -github.com/aws/aws-sdk-go-v2/service/lambda v1.49.5/go.mod h1:0V5z1X/8NA9eQ5cZSz5ZaHU8xA/hId2ZAlsHeO7Jrdk= +github.com/aws/aws-sdk-go-v2/service/lambda v1.49.6 h1:w8lI9zlVwRTL9f4KB9fRThddhRivv+EQQzv2nU8JDQo= +github.com/aws/aws-sdk-go-v2/service/lambda v1.49.6/go.mod h1:0V5z1X/8NA9eQ5cZSz5ZaHU8xA/hId2ZAlsHeO7Jrdk= github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.38.5 h1:3brhc6+qCRptJQB49YhOlLDFJM324GrXcpMK6knozdE= github.com/aws/aws-sdk-go-v2/service/lexmodelsv2 v1.38.5/go.mod h1:f+42yqPylOVSwssJ54Bk1TJDvLvGgy1SGTe/vwagfgo= github.com/aws/aws-sdk-go-v2/service/lightsail v1.32.5 h1:0KVnA62WGcVdeJKH+DTUkxNms2OsIky+AmB2iX93eAs= github.com/aws/aws-sdk-go-v2/service/lightsail v1.32.5/go.mod h1:wI7palPB84YaqCYglfNiyAlDcXTFbcJ9rDHMu15cFto= github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.25.5 h1:ppIqmTGLQo5emXMrMN/mQKNK5QdaYj4Wjmfpp4uMPz0= github.com/aws/aws-sdk-go-v2/service/lookoutmetrics v1.25.5/go.mod h1:ScibKBixJ/ywFZFjkmnKZmqjHwwnqqtLRBDV+XyzLoQ= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.5 h1:H0mJ0k7VH3Wctsxv3K42A7BxOvPDtJHavvoXlwc0+g0= -github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.5/go.mod h1:MFGDlrVZ4xLoX2BXiBa0fpEyMzEiFFWviK51g6V8Axs= -github.com/aws/aws-sdk-go-v2/service/medialive v1.43.3 h1:/Ub7sD+eD7K6FWQeuALyVApqyec5Ngk893X3VrVPN6c= -github.com/aws/aws-sdk-go-v2/service/medialive v1.43.3/go.mod h1:fH6Wz0q9JXupxmSgCFPwxymnpiX6PitFx2f/AqjuayM= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.6 h1:F7gqS0g3wUoGVyrR4l0Y1XuwsVY02xW6RHIoDJi7AuU= +github.com/aws/aws-sdk-go-v2/service/mediaconnect v1.24.6/go.mod h1:MFGDlrVZ4xLoX2BXiBa0fpEyMzEiFFWviK51g6V8Axs= +github.com/aws/aws-sdk-go-v2/service/medialive v1.44.0 h1:AZWCphV9RTW5Q50lavoCgdWu57Y4cxAmir90ZjN/cxo= +github.com/aws/aws-sdk-go-v2/service/medialive v1.44.0/go.mod h1:fH6Wz0q9JXupxmSgCFPwxymnpiX6PitFx2f/AqjuayM= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.28.5 h1:z+b1lClMC3rSxlUQqRbpGh/uMmUHWC96uQ+AzzZpens= github.com/aws/aws-sdk-go-v2/service/mediapackage v1.28.5/go.mod h1:wGaElJ8kmGJ08nnirzZ/6iWKqBPErlHqtpkbx9go82Q= github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.7.5 h1:tkFfqFu8yx0AmRZAlwcF6hdDf7E7J+0P4tRAtfVB2bA= github.com/aws/aws-sdk-go-v2/service/mediapackagev2 v1.7.5/go.mod h1:pPsl4jKNPkhp2unuSQ3upeQ+9U8onSOPA2B++m5bD8o= github.com/aws/aws-sdk-go-v2/service/mq v1.20.6 h1:n86T5yw0kS6a5nbpkEpDzLPCBXXb35lx3iDkmQWlizA= github.com/aws/aws-sdk-go-v2/service/mq v1.20.6/go.mod h1:phfKOOpMQhlBv2KE8gF17P82zLcSedA9b7fMSGTLBdQ= -github.com/aws/aws-sdk-go-v2/service/oam v1.7.5 h1:Z5qjasrNlticGJVwZahvPiv7cnGeuEFGQ5AdCeTgf/0= -github.com/aws/aws-sdk-go-v2/service/oam v1.7.5/go.mod h1:qwJgNmAMUGFkLgAgTtkZZpGf9Qe1L0PwMD4oXMeS9Ic= +github.com/aws/aws-sdk-go-v2/service/oam v1.7.6 h1:YBKVU2TNkrfNzxh1BxURBgr0jq/7r1NBIXd41m6NhSg= +github.com/aws/aws-sdk-go-v2/service/oam v1.7.6/go.mod h1:qwJgNmAMUGFkLgAgTtkZZpGf9Qe1L0PwMD4oXMeS9Ic= github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5 h1:V+zBQiUAATdwx3rLbc4Em+G0IeqPtY1281lHMrTvIK4= github.com/aws/aws-sdk-go-v2/service/opensearchserverless v1.9.5/go.mod h1:Hky91JAG7y6hJrIoZ6IyJlB99+AFOPUIfqVQcZ+fbhY= github.com/aws/aws-sdk-go-v2/service/osis v1.6.5 h1:u0FL7wY1ni4WQkpfUiBslPmwKOltziQkGg5njTpPH6M= @@ -177,44 +177,44 @@ github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5 h1:yJniPHxzGy0jtJNkXYTqI8ps github.com/aws/aws-sdk-go-v2/service/pricing v1.24.5/go.mod h1:Er8P68q9ayXFNzdTLKH9vGQ5Pq6fzqv0YYjslHxh8GE= github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5 h1:dzxL7EqY37jp4AGBbMXyZT+koN8WMCEO0XCPuLp17pw= github.com/aws/aws-sdk-go-v2/service/qldb v1.19.5/go.mod h1:tN5rVxOznGnV6y5gXixoL83vMOAuPTFAnqafo813M8A= -github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3 h1:5rT2pGAFgU2c/nkAZM2iDVVkLceQ04XFgkeWxKM04/4= -github.com/aws/aws-sdk-go-v2/service/rbin v1.14.3/go.mod h1:yX/8MJOGKdhrLvzOHppNzJvBQh5OKocDq4sP3CtXxgE= -github.com/aws/aws-sdk-go-v2/service/rds v1.64.6 h1:5aUu86tGOprdKtoIClCYPC6i4xalRDztBOlXgJnQFHk= -github.com/aws/aws-sdk-go-v2/service/rds v1.64.6/go.mod h1:MYzRMSdY70kcS8AFg0aHmk/xj6VAe0UfaCCoLrBWPow= +github.com/aws/aws-sdk-go-v2/service/rbin v1.14.4 h1:skHEuWaHjlLdGClBI89gkdM1+O3iJuGWYGIkKXziNwQ= +github.com/aws/aws-sdk-go-v2/service/rbin v1.14.4/go.mod h1:yX/8MJOGKdhrLvzOHppNzJvBQh5OKocDq4sP3CtXxgE= +github.com/aws/aws-sdk-go-v2/service/rds v1.66.0 h1:WUQ6kmnta31GhQvRJtHPVoO4hSNF8Yh2CQIFCZbhZ8g= +github.com/aws/aws-sdk-go-v2/service/rds v1.66.0/go.mod h1:MYzRMSdY70kcS8AFg0aHmk/xj6VAe0UfaCCoLrBWPow= github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5 h1:jGGtFvVJ7RwXtAYOxLoUzWw5WjvsO1NYWuMawL64gZU= github.com/aws/aws-sdk-go-v2/service/redshiftdata v1.23.5/go.mod h1:nJQaSBV7r9td6WMmDDGKtlwE8D9BIDEDIpANfN+gMPE= github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5 h1:7+BV1yNEchDbrgg/hdPVAi3jomqkoI5lqcQcTWTunGA= github.com/aws/aws-sdk-go-v2/service/resourceexplorer2 v1.8.5/go.mod h1:/zyGxTiN9z6xm3bEF4nJJLCqnbfcua+oLGrtr3xNiuE= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5 h1:WDwFoNiIKvLkQJPSYs/KGefGknjn45xKQVTW96Lpcx0= -github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.5/go.mod h1:kHgibL7mHteV68QqxEWk/+GfSioAUZGBlz4e3Vs2r60= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5 h1:vINTeQlqUbYkyKichayWejWqsMNya35Mj7XBcUZnwVI= -github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.5/go.mod h1:Nngchp1Q7LNBS8J10r4P0npfroNRaCVz6wWNfBz7j4E= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.6 h1:tSwNkjd5Uu0gHinlCaxHRFdud5q07ZGRRZtQ5kOuoH4= +github.com/aws/aws-sdk-go-v2/service/resourcegroups v1.19.6/go.mod h1:kHgibL7mHteV68QqxEWk/+GfSioAUZGBlz4e3Vs2r60= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.6 h1:wiM6xGxWTPI8Yck4efgQGS0lanuMILbng8oukqa4bNM= +github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.19.6/go.mod h1:Nngchp1Q7LNBS8J10r4P0npfroNRaCVz6wWNfBz7j4E= github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6 h1:K//BccrDBRMSQCa4UkVVYCp2y4z77arQiT2TYl88wY0= github.com/aws/aws-sdk-go-v2/service/rolesanywhere v1.6.6/go.mod h1:e2+mEoq1rHtFpX8p6WcgiFgnDz0zG6y1BY/g8us9g2I= github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5 h1:WDr8iQXuDzL6ERqRvpdIy1ZdOjg6lXlEHSo8wOJiOyI= github.com/aws/aws-sdk-go-v2/service/route53domains v1.20.5/go.mod h1:7fnaaVoKfZaWJ8RuNYTYV3SkqD6BkFYlRuFDEkHajpc= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6 h1:bkmlzokzTJyrFNA0J+EPlsF8x4/wp+9D45HTHO/ZUiY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.47.6/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= -github.com/aws/aws-sdk-go-v2/service/s3control v1.41.6 h1:pUtQfdf+KaKjsXFFlvVMVJpyVttwE5/tDTKgVX4oGcA= -github.com/aws/aws-sdk-go-v2/service/s3control v1.41.6/go.mod h1:sjVex3IIN70lry8Diga0vdi1DoHFwyFXY68ols4I8VI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.7 h1:o0ASbVwUAIrfp/WcCac+6jioZt4Hd8k/1X8u7GJ/QeM= +github.com/aws/aws-sdk-go-v2/service/s3 v1.47.7/go.mod h1:vADO6Jn+Rq4nDtfwNjhgR84qkZwiC6FqCaXdw/kYwjA= +github.com/aws/aws-sdk-go-v2/service/s3control v1.41.7 h1:AEsJtsgEqqaBSww0UeUdzT4VBc4H1AdlXY3zLD8T0Bw= +github.com/aws/aws-sdk-go-v2/service/s3control v1.41.7/go.mod h1:sjVex3IIN70lry8Diga0vdi1DoHFwyFXY68ols4I8VI= github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5 h1:RpON5qyMUJKOGdQt0K7RUmV0zTUVSSGWtjvh/0CAqd8= github.com/aws/aws-sdk-go-v2/service/scheduler v1.6.5/go.mod h1:CXWnhzgqEhXAYwTVg4vBZQcP+yb4KxXOkogYih2tFm8= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0 h1:ft7wTBdLlWGoZpF22CHmDywWj//MTUjyJoevEXBRHZg= -github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.0/go.mod h1:f//4sy7Yk66HjLWyQcFb6Vtkp/HEforV7G99czcsq54= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.1 h1:g0MpXSS40/MCpbYotttQtcT8FZNeTBSdOEs/KHYYaLE= +github.com/aws/aws-sdk-go-v2/service/securityhub v1.44.1/go.mod h1:f//4sy7Yk66HjLWyQcFb6Vtkp/HEforV7G99czcsq54= github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5 h1:gZ1yiSTBmJuQ1LCDIXlFQ+1XvC91QzAwRIhJ1L4ROp0= github.com/aws/aws-sdk-go-v2/service/securitylake v1.10.5/go.mod h1:eTBmkdUxVPP+Dy47TDGw9ZV6i7Y2oxYMFrxSkEPNO3w= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.5 h1:IN/aY5wGoRMfZJuuZrp07bvdJt9M7Nh7+alOjae7mM4= -github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.5/go.mod h1:mSa1Q/Q1/nAVj7nShrepbcRz1vXQFWv5sb9CFL1/4OM= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.6 h1:iuMXuK4Rl9Rx5xmlhCCGcwSLuepvAF5l80TtvixZVl8= +github.com/aws/aws-sdk-go-v2/service/servicequotas v1.19.6/go.mod h1:mSa1Q/Q1/nAVj7nShrepbcRz1vXQFWv5sb9CFL1/4OM= github.com/aws/aws-sdk-go-v2/service/sesv2 v1.24.5 h1:40JojNesfzskcmQvfj6UUxH1nzN4UtXWfjlSFfFqsns= github.com/aws/aws-sdk-go-v2/service/sesv2 v1.24.5/go.mod h1:ecfOtw2ELIDKjgOxV7Zbg++MwZN0kFDqK8tLxF7uSys= github.com/aws/aws-sdk-go-v2/service/signer v1.19.6 h1:Y4Rikb/krOWTfdy6dzQ2/WbBGRTTPcM6qAB+Mt0QKVo= github.com/aws/aws-sdk-go-v2/service/signer v1.19.6/go.mod h1:Y3u+41K5TVVkKhSlzZ+mtUI9z1k13TxpLtbJNHhV3fA= -github.com/aws/aws-sdk-go-v2/service/sns v1.26.5 h1:umyC9zH/A1w8AXrrG7iMxT4Rfgj80FjfvLannWt5vuE= -github.com/aws/aws-sdk-go-v2/service/sns v1.26.5/go.mod h1:IrcbquqMupzndZ20BXxDxjM7XenTRhbwBOetk4+Z5oc= -github.com/aws/aws-sdk-go-v2/service/sqs v1.29.5 h1:cJb4I498c1mrOVrRqYTcnLD65AFqUuseHfzHdNZHL9U= -github.com/aws/aws-sdk-go-v2/service/sqs v1.29.5/go.mod h1:mCUv04gd/7g+/HNzDB4X6dzJuygji0ckvB3Lg/TdG5Y= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.5 h1:5SI5O2tMp/7E/FqhYnaKdxbWjlCi2yujjNI/UO725iU= -github.com/aws/aws-sdk-go-v2/service/ssm v1.44.5/go.mod h1:uXndCJoDO9gpuK24rNWVCnrGNUydKFEAYAZ7UU9S0rQ= +github.com/aws/aws-sdk-go-v2/service/sns v1.26.6 h1:w2YwF8889ardGU3Y0qZbJ4Zzh+Q/QqKZ4kwkK7JFvnI= +github.com/aws/aws-sdk-go-v2/service/sns v1.26.6/go.mod h1:IrcbquqMupzndZ20BXxDxjM7XenTRhbwBOetk4+Z5oc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.6 h1:UdbDTllc7cmusTTMy1dcTrYKRl4utDEsmKh9ZjvhJCc= +github.com/aws/aws-sdk-go-v2/service/sqs v1.29.6/go.mod h1:mCUv04gd/7g+/HNzDB4X6dzJuygji0ckvB3Lg/TdG5Y= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.6 h1:EZw+TRx/4qlfp6VJ0P1sx04Txd9yGNK+NiO1upaXmh4= +github.com/aws/aws-sdk-go-v2/service/ssm v1.44.6/go.mod h1:uXndCJoDO9gpuK24rNWVCnrGNUydKFEAYAZ7UU9S0rQ= github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.20.5 h1:qIzGNd+8lT3hXdq/TJ7sxGWq9xI1uKfeorwP4tYuJR0= github.com/aws/aws-sdk-go-v2/service/ssmcontacts v1.20.5/go.mod h1:Jo4uHzInZp+heTq54nz0c71D1a2som4mlvK/jDtZSKw= github.com/aws/aws-sdk-go-v2/service/ssmincidents v1.27.5 h1:WOVvRHb2gJaaQNXkjxT5DSHazMwlycAqi4SMHnX1kyI= @@ -225,10 +225,10 @@ github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.23.5 h1:WaH4tywTDnktvZFmNEMlgxJ github.com/aws/aws-sdk-go-v2/service/ssoadmin v1.23.5/go.mod h1:8o8oOg3mQJcmwWdjfVSILMWrSJyXiohzTFuqYMrmy6Q= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg= -github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= -github.com/aws/aws-sdk-go-v2/service/swf v1.20.5 h1:9CU3kwRGpUReKubOsmxgG9LfaVpZ1PW/ON+5ZTKu5Gs= -github.com/aws/aws-sdk-go-v2/service/swf v1.20.5/go.mod h1:i01QTdCHqrntRqtNeYmxUSDCcmXERzFCePIcHDjASHE= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6 h1:HJeiuZ2fldpd0WqngyMR6KW7ofkXNLyOaHwEIGm39Cs= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.6/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= +github.com/aws/aws-sdk-go-v2/service/swf v1.20.6 h1:zzZXrBWFgS9oiaxKegvtcG2yaHFHBem+vXJRnvHOG5o= +github.com/aws/aws-sdk-go-v2/service/swf v1.20.6/go.mod h1:i01QTdCHqrntRqtNeYmxUSDCcmXERzFCePIcHDjASHE= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.23.6 h1:+7xZRneTlcraXL4+oN2kUlQX9ULh4aIxmcpUoR/faGA= github.com/aws/aws-sdk-go-v2/service/timestreamwrite v1.23.6/go.mod h1:igyhq0ZP1mXLKnSAGrGneVjs2aELNYQkskoF/WxR3+w= github.com/aws/aws-sdk-go-v2/service/transcribe v1.34.5 h1:/UVYwh9hQDvXsCCJcafCKHgykfOa/EpsOfJPgiSYSSU= @@ -239,8 +239,8 @@ github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5 h1:8AV6s1CjF1Kg4wI4Cru0vF github.com/aws/aws-sdk-go-v2/service/vpclattice v1.5.5/go.mod h1:Avxrq4VqhpuKgGdZifhrJP5a9DsDt7cESkdhaZHnYp0= github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6 h1:RrpjQ5xJN/AW0PCO7EGhhVsKq7BeNqkx5+h6p3QOeTU= github.com/aws/aws-sdk-go-v2/service/workspaces v1.35.6/go.mod h1:vkYsJdF9sZl/o1eoK8tSSjzAT+R87QjswOGSTZfyO0Y= -github.com/aws/aws-sdk-go-v2/service/xray v1.23.5 h1:uCqKSGx5Esj9ZW6/zZ7tslkM65aH+qjHO3yboiRqcLo= -github.com/aws/aws-sdk-go-v2/service/xray v1.23.5/go.mod h1:VmWKTNu6V1qRG+skNKkYt7VOFohYdtOp7B2OSvpBZac= +github.com/aws/aws-sdk-go-v2/service/xray v1.23.6 h1:K8QBXlR+ogytjfi/D8nGe1lDNPy5qup8aQTQPppPQuc= +github.com/aws/aws-sdk-go-v2/service/xray v1.23.6/go.mod h1:VmWKTNu6V1qRG+skNKkYt7VOFohYdtOp7B2OSvpBZac= github.com/aws/smithy-go v1.19.0 h1:KWFKQV80DpP3vJrrA9sVAHQ5gc2z8i4EzrLhLlWXcBM= github.com/aws/smithy-go v1.19.0/go.mod h1:NukqUGpCZIILqqiV0NIjeFh24kd/FAa4beRb6nbIUPE= github.com/beevik/etree v1.2.0 h1:l7WETslUG/T+xOPs47dtd6jov2Ii/8/OjCldk5fYfQw= From 92bdc004b6d760ce857a499a3a4badde8b466413 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:01:07 -0500 Subject: [PATCH 415/438] Remove 'lintignore:AWSR001'. --- internal/service/s3/bucket_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 6e7465a5e67..98e999ad9d0 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2441,15 +2441,15 @@ func TestWebsiteEndpoint(t *testing.T) { }{ { LocationConstraint: "", - Expected: fmt.Sprintf("bucket-name.s3-website-%s.amazonaws.com", names.USEast1RegionID), //lintignore:AWSR001 + Expected: fmt.Sprintf("bucket-name.s3-website-%s.%s", names.USEast1RegionID, acctest.PartitionDNSSuffix()), }, { LocationConstraint: names.USEast2RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USEast2RegionID), //lintignore:AWSR001 + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USEast2RegionID, acctest.PartitionDNSSuffix()), }, { LocationConstraint: names.USGovEast1RegionID, - Expected: fmt.Sprintf("bucket-name.s3-website.%s.amazonaws.com", names.USGovEast1RegionID), //lintignore:AWSR001 + Expected: fmt.Sprintf("bucket-name.s3-website.%s.%s", names.USGovEast1RegionID, acctest.PartitionDNSSuffix()), }, { LocationConstraint: names.USISOEast1RegionID, From 32398c3b9cf0ad44c0550408f5e3519e6ceba74c Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:09:44 -0500 Subject: [PATCH 416/438] Additional CHANGELOG entry. --- .changelog/35050.txt | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.changelog/35050.txt b/.changelog/35050.txt index 5f6376645ec..59056c4694c 100644 --- a/.changelog/35050.txt +++ b/.changelog/35050.txt @@ -1,3 +1,7 @@ ```release-note:enhancement resource/aws_lambda_function: Add `logging_config` configuration block in support of [advanced logging controls](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced) ``` + +```release-note:enhancement +data-source/aws_lambda_function: Add `logging_config` attribute +``` \ No newline at end of file From e3f97f293d766012be92e05c0b0879424634602b Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:11:29 -0500 Subject: [PATCH 417/438] Cosmetics. --- internal/service/lambda/function.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/internal/service/lambda/function.go b/internal/service/lambda/function.go index e5965c5edc8..a1af549b838 100644 --- a/internal/service/lambda/function.go +++ b/internal/service/lambda/function.go @@ -1458,10 +1458,10 @@ func flattenLoggingConfig(apiObject *types.LoggingConfig) []map[string]interface return nil } m := map[string]interface{}{ - "application_log_level": string(apiObject.ApplicationLogLevel), - "log_format": string(apiObject.LogFormat), - "log_group": *apiObject.LogGroup, - "system_log_level": string(apiObject.SystemLogLevel), + "application_log_level": apiObject.ApplicationLogLevel, + "log_format": apiObject.LogFormat, + "log_group": aws.ToString(apiObject.LogGroup), + "system_log_level": apiObject.SystemLogLevel, } return []map[string]interface{}{m} From 3e7ef841aeb4910131806c6d67f0f1b761549049 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:17:26 -0500 Subject: [PATCH 418/438] r/aws_s3_bucket: Fix typos. --- internal/service/s3/bucket.go | 39 +++++++++---------- .../s3/bucket_replication_configuration.go | 8 ++-- 2 files changed, 23 insertions(+), 24 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 257c85bd757..eb54c0e3855 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2101,20 +2101,19 @@ func expandBucketLifecycleRules(ctx context.Context, l []interface{}) []types.Li for _, tfMapRaw := range l { tfMap, ok := tfMapRaw.(map[string]interface{}) - if !ok { continue } result := types.LifecycleRule{} - if v, ok := tfMap["abort_incomplete_multipart_upload"].(int); ok && v > 0 { + if v, ok := tfMap["abort_incomplete_multipart_upload_days"].(int); ok && v > 0 { result.AbortIncompleteMultipartUpload = &types.AbortIncompleteMultipartUpload{ DaysAfterInitiation: aws.Int32(int32(v)), } } - if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 { + if v, ok := tfMap["expiration"].([]interface{}); ok && len(v) > 0 && v[0] != nil { result.Expiration = expandBucketLifecycleExpiration(v) } @@ -2444,55 +2443,55 @@ func expandBucketReplicationRules(ctx context.Context, l []interface{}) []types. var rules []types.ReplicationRule for _, tfMapRaw := range l { - tfMap, ok := tfMapRaw.(map[string]interface{}) + tfRuleMap, ok := tfMapRaw.(map[string]interface{}) if !ok { continue } rule := types.ReplicationRule{} - if v, ok := tfMap["status"].(string); ok && v != "" { + if v, ok := tfRuleMap["status"].(string); ok && v != "" { rule.Status = types.ReplicationRuleStatus(v) } else { continue } - if v, ok := tfMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if v, ok := tfRuleMap["destination"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rule.Destination = expandBucketDestination(v) } else { rule.Destination = &types.Destination{} } - if v, ok := tfMap["id"].(string); ok && v != "" { + if v, ok := tfRuleMap["id"].(string); ok && v != "" { rule.ID = aws.String(v) } - if v, ok := tfMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if v, ok := tfRuleMap["source_selection_criteria"].([]interface{}); ok && len(v) > 0 && v[0] != nil { rule.SourceSelectionCriteria = expandBucketSourceSelectionCriteria(v) } - if v, ok := tfMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { + if v, ok := tfRuleMap["filter"].([]interface{}); ok && len(v) > 0 && v[0] != nil { // XML schema V2. - tfMap := v[0].(map[string]interface{}) + tfFilterMap := v[0].(map[string]interface{}) var filter types.ReplicationRuleFilter - if tags := Tags(tftags.New(ctx, tfMap["tags"]).IgnoreAWS()); len(tags) > 0 { + if tags := Tags(tftags.New(ctx, tfFilterMap["tags"]).IgnoreAWS()); len(tags) > 0 { filter = &types.ReplicationRuleFilterMemberAnd{ Value: types.ReplicationRuleAndOperator{ - Prefix: aws.String(tfMap["prefix"].(string)), + Prefix: aws.String(tfFilterMap["prefix"].(string)), Tags: tags, }, } } else { filter = &types.ReplicationRuleFilterMemberPrefix{ - Value: tfMap["prefix"].(string), + Value: tfFilterMap["prefix"].(string), } } rule.Filter = filter - rule.Priority = aws.Int32(int32(tfMap["priority"].(int))) + rule.Priority = aws.Int32(int32(tfRuleMap["priority"].(int))) - if v, ok := tfMap["delete_marker_replication_status"].(string); ok && v != "" { + if v, ok := tfRuleMap["delete_marker_replication_status"].(string); ok && v != "" { rule.DeleteMarkerReplication = &types.DeleteMarkerReplication{ Status: types.DeleteMarkerReplicationStatus(v), } @@ -2503,7 +2502,7 @@ func expandBucketReplicationRules(ctx context.Context, l []interface{}) []types. } } else { // XML schema V1. - rule.Prefix = aws.String(tfMap["prefix"].(string)) + rule.Prefix = aws.String(tfRuleMap["prefix"].(string)) } rules = append(rules, rule) @@ -2651,14 +2650,14 @@ func flattenBucketReplicationRules(ctx context.Context, rules []types.Replicatio m["id"] = aws.ToString(rule.ID) } - if rule.Priority != nil { - m["priority"] = aws.ToInt32(rule.Priority) - } - if rule.Prefix != nil { m["prefix"] = aws.ToString(rule.Prefix) } + if rule.Priority != nil { + m["priority"] = aws.ToInt32(rule.Priority) + } + if rule.SourceSelectionCriteria != nil { m["source_selection_criteria"] = flattenBucketSourceSelectionCriteria(rule.SourceSelectionCriteria) } diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 46e25fe6924..9c790733980 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -895,14 +895,14 @@ func flattenReplicationRules(ctx context.Context, rules []types.ReplicationRule) m["id"] = aws.ToString(rule.ID) } - if rule.Priority != nil { - m["priority"] = aws.ToInt32(rule.Priority) - } - if rule.Prefix != nil { m["prefix"] = aws.ToString(rule.Prefix) } + if rule.Priority != nil { + m["priority"] = aws.ToInt32(rule.Priority) + } + if rule.SourceSelectionCriteria != nil { m["source_selection_criteria"] = flattenSourceSelectionCriteria(rule.SourceSelectionCriteria) } From 5e6fe74091112608d6d51a3e026d59750d8d7f84 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:20:34 -0500 Subject: [PATCH 419/438] 's3BucketPropagationTimeout' -> 'bucketPropagationTimeout'. --- internal/service/s3/bucket.go | 2 +- internal/service/s3/bucket_accelerate_configuration.go | 4 ++-- internal/service/s3/bucket_acl.go | 4 ++-- internal/service/s3/bucket_analytics_configuration.go | 6 +++--- internal/service/s3/bucket_cors_configuration.go | 6 +++--- .../service/s3/bucket_intelligent_tiering_configuration.go | 6 +++--- internal/service/s3/bucket_inventory.go | 6 +++--- internal/service/s3/bucket_lifecycle_configuration.go | 6 +++--- internal/service/s3/bucket_logging.go | 4 ++-- internal/service/s3/bucket_metric.go | 6 +++--- internal/service/s3/bucket_notification.go | 4 ++-- internal/service/s3/bucket_object_lock_configuration.go | 4 ++-- internal/service/s3/bucket_ownership_controls.go | 4 ++-- internal/service/s3/bucket_policy.go | 6 +++--- internal/service/s3/bucket_public_access_block.go | 6 +++--- internal/service/s3/bucket_replication_configuration.go | 6 +++--- internal/service/s3/bucket_request_payment_configuration.go | 4 ++-- .../s3/bucket_server_side_encryption_configuration.go | 6 +++--- internal/service/s3/bucket_test.go | 2 +- internal/service/s3/bucket_versioning.go | 4 ++-- internal/service/s3/bucket_website_configuration.go | 6 +++--- internal/service/s3/exports_test.go | 2 +- 22 files changed, 52 insertions(+), 52 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index eb54c0e3855..2c46fc7ecef 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -46,7 +46,7 @@ import ( const ( // General timeout for S3 bucket changes to propagate. // See https://docs.aws.amazon.com/AmazonS3/latest/userguide/Welcome.html#ConsistencyModel. - s3BucketPropagationTimeout = 2 * time.Minute // nosemgrep:ci.s3-in-const-name, ci.s3-in-var-name + bucketPropagationTimeout = 2 * time.Minute // nosemgrep:ci.s3-in-const-name, ci.s3-in-var-name ) // @SDKResource("aws_s3_bucket", name="Bucket") diff --git a/internal/service/s3/bucket_accelerate_configuration.go b/internal/service/s3/bucket_accelerate_configuration.go index b1499261ac6..4e5ceea7de5 100644 --- a/internal/service/s3/bucket_accelerate_configuration.go +++ b/internal/service/s3/bucket_accelerate_configuration.go @@ -70,7 +70,7 @@ func resourceBucketAccelerateConfigurationCreate(ctx context.Context, d *schema. input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketAccelerateConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -84,7 +84,7 @@ func resourceBucketAccelerateConfigurationCreate(ctx context.Context, d *schema. d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketAccelerateConfiguration(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_acl.go b/internal/service/s3/bucket_acl.go index 689055b737a..a103ca7b394 100644 --- a/internal/service/s3/bucket_acl.go +++ b/internal/service/s3/bucket_acl.go @@ -155,7 +155,7 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta i input.AccessControlPolicy = expandAccessControlPolicy(v.([]interface{})) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketAcl(ctx, input) }, errCodeNoSuchBucket) @@ -169,7 +169,7 @@ func resourceBucketACLCreate(ctx context.Context, d *schema.ResourceData, meta i d.SetId(BucketACLCreateResourceID(bucket, expectedBucketOwner, acl)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketACL(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_analytics_configuration.go b/internal/service/s3/bucket_analytics_configuration.go index 846db0154fe..816ecf50d93 100644 --- a/internal/service/s3/bucket_analytics_configuration.go +++ b/internal/service/s3/bucket_analytics_configuration.go @@ -154,7 +154,7 @@ func resourceBucketAnalyticsConfigurationPut(ctx context.Context, d *schema.Reso AnalyticsConfiguration: analyticsConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketAnalyticsConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -169,7 +169,7 @@ func resourceBucketAnalyticsConfigurationPut(ctx context.Context, d *schema.Reso if d.IsNewResource() { d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findAnalyticsConfiguration(ctx, conn, bucket, name) }) @@ -237,7 +237,7 @@ func resourceBucketAnalyticsConfigurationDelete(ctx context.Context, d *schema.R return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Analytics Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findAnalyticsConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_cors_configuration.go b/internal/service/s3/bucket_cors_configuration.go index 00a11ce8905..29f248a3cef 100644 --- a/internal/service/s3/bucket_cors_configuration.go +++ b/internal/service/s3/bucket_cors_configuration.go @@ -103,7 +103,7 @@ func resourceBucketCorsConfigurationCreate(ctx context.Context, d *schema.Resour input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketCors(ctx, input) }, errCodeNoSuchBucket) @@ -117,7 +117,7 @@ func resourceBucketCorsConfigurationCreate(ctx context.Context, d *schema.Resour d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findCORSRules(ctx, conn, bucket, expectedBucketOwner) }) @@ -209,7 +209,7 @@ func resourceBucketCorsConfigurationDelete(ctx context.Context, d *schema.Resour return diag.Errorf("deleting S3 Bucket CORS Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findCORSRules(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_intelligent_tiering_configuration.go b/internal/service/s3/bucket_intelligent_tiering_configuration.go index ea9275eefba..feeb2c86ac1 100644 --- a/internal/service/s3/bucket_intelligent_tiering_configuration.go +++ b/internal/service/s3/bucket_intelligent_tiering_configuration.go @@ -119,7 +119,7 @@ func resourceBucketIntelligentTieringConfigurationPut(ctx context.Context, d *sc IntelligentTieringConfiguration: intelligentTieringConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketIntelligentTieringConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -134,7 +134,7 @@ func resourceBucketIntelligentTieringConfigurationPut(ctx context.Context, d *sc if d.IsNewResource() { d.SetId(BucketIntelligentTieringConfigurationCreateResourceID(bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findIntelligentTieringConfiguration(ctx, conn, bucket, name) }) @@ -207,7 +207,7 @@ func resourceBucketIntelligentTieringConfigurationDelete(ctx context.Context, d return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Intelligent-Tiering Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findIntelligentTieringConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_inventory.go b/internal/service/s3/bucket_inventory.go index 38d89603718..6f496837560 100644 --- a/internal/service/s3/bucket_inventory.go +++ b/internal/service/s3/bucket_inventory.go @@ -215,7 +215,7 @@ func resourceBucketInventoryPut(ctx context.Context, d *schema.ResourceData, met InventoryConfiguration: inventoryConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketInventoryConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -230,7 +230,7 @@ func resourceBucketInventoryPut(ctx context.Context, d *schema.ResourceData, met if d.IsNewResource() { d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findInventoryConfiguration(ctx, conn, bucket, name) }) @@ -311,7 +311,7 @@ func resourceBucketInventoryDelete(ctx context.Context, d *schema.ResourceData, return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Inventory (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findInventoryConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_lifecycle_configuration.go b/internal/service/s3/bucket_lifecycle_configuration.go index 957f25f1298..fb5743d3640 100644 --- a/internal/service/s3/bucket_lifecycle_configuration.go +++ b/internal/service/s3/bucket_lifecycle_configuration.go @@ -270,7 +270,7 @@ func resourceBucketLifecycleConfigurationCreate(ctx context.Context, d *schema.R input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketLifecycleConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -372,7 +372,7 @@ func resourceBucketLifecycleConfigurationUpdate(ctx context.Context, d *schema.R input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketLifecycleConfiguration(ctx, input) }, errCodeNoSuchLifecycleConfiguration) @@ -414,7 +414,7 @@ func resourceBucketLifecycleConfigurationDelete(ctx context.Context, d *schema.R return diag.Errorf("deleting S3 Bucket Lifecycle Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findLifecycleRules(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_logging.go b/internal/service/s3/bucket_logging.go index 52ebd5e356e..770687319e0 100644 --- a/internal/service/s3/bucket_logging.go +++ b/internal/service/s3/bucket_logging.go @@ -162,7 +162,7 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me input.BucketLoggingStatus.LoggingEnabled.TargetObjectKeyFormat = expandTargetObjectKeyFormat(v.([]interface{})[0].(map[string]interface{})) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketLogging(ctx, input) }, errCodeNoSuchBucket) @@ -176,7 +176,7 @@ func resourceBucketLoggingCreate(ctx context.Context, d *schema.ResourceData, me d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findLoggingEnabled(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_metric.go b/internal/service/s3/bucket_metric.go index 69f37d94ce4..9977b160674 100644 --- a/internal/service/s3/bucket_metric.go +++ b/internal/service/s3/bucket_metric.go @@ -93,7 +93,7 @@ func resourceBucketMetricPut(ctx context.Context, d *schema.ResourceData, meta i MetricsConfiguration: metricsConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketMetricsConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -108,7 +108,7 @@ func resourceBucketMetricPut(ctx context.Context, d *schema.ResourceData, meta i if d.IsNewResource() { d.SetId(fmt.Sprintf("%s:%s", bucket, name)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findMetricsConfiguration(ctx, conn, bucket, name) }) @@ -175,7 +175,7 @@ func resourceBucketMetricDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Metric (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findMetricsConfiguration(ctx, conn, bucket, name) }) diff --git a/internal/service/s3/bucket_notification.go b/internal/service/s3/bucket_notification.go index 1dc624630b4..3296e29003b 100644 --- a/internal/service/s3/bucket_notification.go +++ b/internal/service/s3/bucket_notification.go @@ -300,7 +300,7 @@ func resourceBucketNotificationPut(ctx context.Context, d *schema.ResourceData, NotificationConfiguration: notificationConfiguration, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketNotificationConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -315,7 +315,7 @@ func resourceBucketNotificationPut(ctx context.Context, d *schema.ResourceData, if d.IsNewResource() { d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketNotificationConfiguration(ctx, conn, d.Id(), "") }) diff --git a/internal/service/s3/bucket_object_lock_configuration.go b/internal/service/s3/bucket_object_lock_configuration.go index 9ede730b67d..70ce4ac0baa 100644 --- a/internal/service/s3/bucket_object_lock_configuration.go +++ b/internal/service/s3/bucket_object_lock_configuration.go @@ -122,7 +122,7 @@ func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema. input.Token = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutObjectLockConfiguration(ctx, input) }, errCodeNoSuchBucket) @@ -136,7 +136,7 @@ func resourceBucketObjectLockConfigurationCreate(ctx context.Context, d *schema. d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findObjectLockConfiguration(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_ownership_controls.go b/internal/service/s3/bucket_ownership_controls.go index 5d2c4b32990..2363c669069 100644 --- a/internal/service/s3/bucket_ownership_controls.go +++ b/internal/service/s3/bucket_ownership_controls.go @@ -84,7 +84,7 @@ func resourceBucketOwnershipControlsCreate(ctx context.Context, d *schema.Resour d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findOwnershipControls(ctx, conn, d.Id()) }) @@ -158,7 +158,7 @@ func resourceBucketOwnershipControlsDelete(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Ownership Controls (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findOwnershipControls(ctx, conn, d.Id()) }) diff --git a/internal/service/s3/bucket_policy.go b/internal/service/s3/bucket_policy.go index ab0d4516985..1e0e9f3bf5d 100644 --- a/internal/service/s3/bucket_policy.go +++ b/internal/service/s3/bucket_policy.go @@ -72,7 +72,7 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta i Policy: aws.String(policy), } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketPolicy(ctx, input) }, errCodeMalformedPolicy, errCodeNoSuchBucket) @@ -83,7 +83,7 @@ func resourceBucketPolicyPut(ctx context.Context, d *schema.ResourceData, meta i if d.IsNewResource() { d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketPolicy(ctx, conn, d.Id()) }) @@ -145,7 +145,7 @@ func resourceBucketPolicyDelete(ctx context.Context, d *schema.ResourceData, met return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Policy (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketPolicy(ctx, conn, d.Id()) }) diff --git a/internal/service/s3/bucket_public_access_block.go b/internal/service/s3/bucket_public_access_block.go index 2339cec6715..25bc7749d33 100644 --- a/internal/service/s3/bucket_public_access_block.go +++ b/internal/service/s3/bucket_public_access_block.go @@ -76,7 +76,7 @@ func resourceBucketPublicAccessBlockCreate(ctx context.Context, d *schema.Resour }, } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutPublicAccessBlock(ctx, input) }, errCodeNoSuchBucket) @@ -90,7 +90,7 @@ func resourceBucketPublicAccessBlockCreate(ctx context.Context, d *schema.Resour d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findPublicAccessBlockConfiguration(ctx, conn, d.Id()) }) @@ -176,7 +176,7 @@ func resourceBucketPublicAccessBlockDelete(ctx context.Context, d *schema.Resour return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Public Access Block (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findPublicAccessBlockConfiguration(ctx, conn, d.Id()) }) diff --git a/internal/service/s3/bucket_replication_configuration.go b/internal/service/s3/bucket_replication_configuration.go index 9c790733980..0fa7530e9f7 100644 --- a/internal/service/s3/bucket_replication_configuration.go +++ b/internal/service/s3/bucket_replication_configuration.go @@ -328,7 +328,7 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema input.Token = aws.String(v.(string)) } - err := retry.RetryContext(ctx, s3BucketPropagationTimeout, func() *retry.RetryError { + err := retry.RetryContext(ctx, bucketPropagationTimeout, func() *retry.RetryError { _, err := conn.PutBucketReplication(ctx, input) if tfawserr.ErrCodeEquals(err, errCodeNoSuchBucket) || tfawserr.ErrMessageContains(err, errCodeInvalidRequest, "Versioning must be 'Enabled' on the bucket") { @@ -356,7 +356,7 @@ func resourceBucketReplicationConfigurationCreate(ctx context.Context, d *schema d.SetId(bucket) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findReplicationConfiguration(ctx, conn, d.Id()) }) @@ -434,7 +434,7 @@ func resourceBucketReplicationConfigurationDelete(ctx context.Context, d *schema return sdkdiag.AppendErrorf(diags, "deleting S3 Bucket Replication Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findReplicationConfiguration(ctx, conn, d.Id()) }) diff --git a/internal/service/s3/bucket_request_payment_configuration.go b/internal/service/s3/bucket_request_payment_configuration.go index f9db85f1e13..1996acdd27e 100644 --- a/internal/service/s3/bucket_request_payment_configuration.go +++ b/internal/service/s3/bucket_request_payment_configuration.go @@ -70,7 +70,7 @@ func resourceBucketRequestPaymentConfigurationCreate(ctx context.Context, d *sch input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketRequestPayment(ctx, input) }, errCodeNoSuchBucket) @@ -84,7 +84,7 @@ func resourceBucketRequestPaymentConfigurationCreate(ctx context.Context, d *sch d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketRequestPayment(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/bucket_server_side_encryption_configuration.go b/internal/service/s3/bucket_server_side_encryption_configuration.go index 39967ae1857..4ef04a09f2d 100644 --- a/internal/service/s3/bucket_server_side_encryption_configuration.go +++ b/internal/service/s3/bucket_server_side_encryption_configuration.go @@ -95,7 +95,7 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketEncryption(ctx, input) }, errCodeNoSuchBucket, errCodeOperationAborted) @@ -109,7 +109,7 @@ func resourceBucketServerSideEncryptionConfigurationCreate(ctx context.Context, d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findServerSideEncryptionConfiguration(ctx, conn, bucket, expectedBucketOwner) }) @@ -167,7 +167,7 @@ func resourceBucketServerSideEncryptionConfigurationUpdate(ctx context.Context, input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketEncryption(ctx, input) }, errCodeNoSuchBucket, errCodeOperationAborted) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 98e999ad9d0..afefdcd751f 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -2488,7 +2488,7 @@ func testAccCheckBucketDestroyWithProvider(ctx context.Context) acctest.TestChec // S3 seems to be highly eventually consistent. Even if one connection reports that the queue is gone, // another connection may still report it as present. - _, err := tfresource.RetryUntilNotFound(ctx, tfs3.S3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryUntilNotFound(ctx, tfs3.BucketPropagationTimeout, func() (interface{}, error) { return nil, tfs3.FindBucket(ctx, conn, rs.Primary.ID) }) diff --git a/internal/service/s3/bucket_versioning.go b/internal/service/s3/bucket_versioning.go index 211f5304e7a..67734f54a46 100644 --- a/internal/service/s3/bucket_versioning.go +++ b/internal/service/s3/bucket_versioning.go @@ -123,7 +123,7 @@ func resourceBucketVersioningCreate(ctx context.Context, d *schema.ResourceData, input.MFA = aws.String(v.(string)) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketVersioning(ctx, input) }, errCodeNoSuchBucket) @@ -343,7 +343,7 @@ func waitForBucketVersioningStatus(ctx context.Context, conn *s3.Client, bucket, Pending: []string{""}, Target: bucketVersioningStatus_Values(), Refresh: statusBucketVersioning(ctx, conn, bucket, expectedBucketOwner), - Timeout: s3BucketPropagationTimeout, + Timeout: bucketPropagationTimeout, ContinuousTargetOccurence: 3, NotFoundChecks: 3, Delay: 1 * time.Second, diff --git a/internal/service/s3/bucket_website_configuration.go b/internal/service/s3/bucket_website_configuration.go index 1b0bc70c56a..53d887d81d6 100644 --- a/internal/service/s3/bucket_website_configuration.go +++ b/internal/service/s3/bucket_website_configuration.go @@ -217,7 +217,7 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res input.ExpectedBucketOwner = aws.String(expectedBucketOwner) } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, bucketPropagationTimeout, func() (interface{}, error) { return conn.PutBucketWebsite(ctx, input) }, errCodeNoSuchBucket) @@ -231,7 +231,7 @@ func resourceBucketWebsiteConfigurationCreate(ctx context.Context, d *schema.Res d.SetId(CreateResourceID(bucket, expectedBucketOwner)) - _, err = tfresource.RetryWhenNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryWhenNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) }) @@ -386,7 +386,7 @@ func resourceBucketWebsiteConfigurationDelete(ctx context.Context, d *schema.Res return diag.Errorf("deleting S3 Bucket Website Configuration (%s): %s", d.Id(), err) } - _, err = tfresource.RetryUntilNotFound(ctx, s3BucketPropagationTimeout, func() (interface{}, error) { + _, err = tfresource.RetryUntilNotFound(ctx, bucketPropagationTimeout, func() (interface{}, error) { return findBucketWebsite(ctx, conn, bucket, expectedBucketOwner) }) diff --git a/internal/service/s3/exports_test.go b/internal/service/s3/exports_test.go index 02dc4568ae8..b7eb022e7f7 100644 --- a/internal/service/s3/exports_test.go +++ b/internal/service/s3/exports_test.go @@ -37,10 +37,10 @@ var ( SDKv1CompatibleCleanKey = sdkv1CompatibleCleanKey ValidBucketName = validBucketName + BucketPropagationTimeout = bucketPropagationTimeout ErrCodeBucketAlreadyExists = errCodeBucketAlreadyExists ErrCodeBucketAlreadyOwnedByYou = errCodeBucketAlreadyOwnedByYou ErrCodeNoSuchCORSConfiguration = errCodeNoSuchCORSConfiguration LifecycleRuleStatusDisabled = lifecycleRuleStatusDisabled LifecycleRuleStatusEnabled = lifecycleRuleStatusEnabled - S3BucketPropagationTimeout = s3BucketPropagationTimeout ) From a25bed13d1484fa792c3f24255e03790f52c45b6 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:26:10 -0500 Subject: [PATCH 420/438] r/aws_s3_bucket: Fix 'panic: interface conversion: interface {} is types.Permission, not string'. --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 2c46fc7ecef..12855963d10 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2056,7 +2056,7 @@ func flattenBucketGrants(apiObject *s3.GetBucketAclOutput) []interface{} { } if v, ok := getGrant(results, m); ok { - v.(map[string]interface{})["permissions"].(*schema.Set).Add(apiObject.Permission) + v.(map[string]interface{})["permissions"].(*schema.Set).Add(string(apiObject.Permission)) } else { m["permissions"] = schema.NewSet(schema.HashString, []interface{}{string(apiObject.Permission)}) results = append(results, m) From f19d88ca92aac93909a800cfe8d4366af724f87f Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:31:39 -0500 Subject: [PATCH 421/438] Cosmetics. --- internal/service/ec2/verifiedaccess_endpoint.go | 2 +- internal/service/ec2/verifiedaccess_endpoint_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/internal/service/ec2/verifiedaccess_endpoint.go b/internal/service/ec2/verifiedaccess_endpoint.go index 6921e4351e0..fe8a9308d68 100644 --- a/internal/service/ec2/verifiedaccess_endpoint.go +++ b/internal/service/ec2/verifiedaccess_endpoint.go @@ -328,8 +328,8 @@ func resourceVerifiedAccessEndpointUpdate(ctx context.Context, d *schema.Resourc if d.HasChange("policy_document") { input := &ec2.ModifyVerifiedAccessEndpointPolicyInput{ PolicyDocument: aws.String(d.Get("policy_document").(string)), - VerifiedAccessEndpointId: aws.String(d.Id()), PolicyEnabled: aws.Bool(true), + VerifiedAccessEndpointId: aws.String(d.Id()), } _, err := conn.ModifyVerifiedAccessEndpointPolicy(ctx, input) diff --git a/internal/service/ec2/verifiedaccess_endpoint_test.go b/internal/service/ec2/verifiedaccess_endpoint_test.go index 8ffdd6e4e80..4d277455532 100644 --- a/internal/service/ec2/verifiedaccess_endpoint_test.go +++ b/internal/service/ec2/verifiedaccess_endpoint_test.go @@ -46,6 +46,7 @@ func TestAccVerifiedAccessEndpoint_basic(t *testing.T) { resource.TestCheckResourceAttrSet(resourceName, "domain_certificate_arn"), resource.TestCheckResourceAttr(resourceName, "endpoint_domain_prefix", "example"), resource.TestCheckResourceAttr(resourceName, "endpoint_type", "load-balancer"), + resource.TestCheckResourceAttr(resourceName, "policy_document", ""), resource.TestCheckResourceAttr(resourceName, "sse_specification.0.customer_managed_key_enabled", "false"), resource.TestCheckResourceAttrSet(resourceName, "load_balancer_options.0.load_balancer_arn"), resource.TestCheckResourceAttr(resourceName, "load_balancer_options.0.port", "443"), From 556fc0ec97e597e24ad134ca5d5d84e783cfbc54 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:47:39 -0500 Subject: [PATCH 422/438] Update sqs_queue.html.markdown --- website/docs/r/sqs_queue.html.markdown | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/website/docs/r/sqs_queue.html.markdown b/website/docs/r/sqs_queue.html.markdown index 4457f6f6203..fd4303b52e6 100644 --- a/website/docs/r/sqs_queue.html.markdown +++ b/website/docs/r/sqs_queue.html.markdown @@ -54,7 +54,8 @@ resource "aws_sqs_queue" "terraform_queue" { ```terraform resource "aws_sqs_queue" "terraform_queue" { - name = "terraform-example-queue" + name = "terraform-example-queue" + redrive_policy = jsonencode({ deadLetterTargetArn = aws_sqs_queue.terraform_queue_deadletter.arn maxReceiveCount = 4 From 9565c8062bd120c3adbf2e326358e30236d9d946 Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 22 Dec 2023 19:48:30 +0000 Subject: [PATCH 423/438] Update CHANGELOG.md for #35051 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bb0ec87ae0..07a3c56d976 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,9 +9,11 @@ ENHANCEMENTS: * data-source/aws_batch_compute_environment: Add `update_policy` attribute ([#34353](https://github.com/hashicorp/terraform-provider-aws/issues/34353)) * data-source/aws_ecr_image: Add `image_uri` attribute ([#24526](https://github.com/hashicorp/terraform-provider-aws/issues/24526)) +* data-source/aws_lambda_function: Add `logging_config` attribute ([#35050](https://github.com/hashicorp/terraform-provider-aws/issues/35050)) * resource/aws_batch_compute_environment: Add `update_policy` parameter ([#34353](https://github.com/hashicorp/terraform-provider-aws/issues/34353)) * resource/aws_dms_replication_task: Allow `cdc_start_time` to use [RFC3339](https://www.rfc-editor.org/rfc/rfc3339) formatted dates in addition to UNIX timestamps ([#31917](https://github.com/hashicorp/terraform-provider-aws/issues/31917)) * resource/aws_dms_replication_task: Remove [ForceNew](https://developer.hashicorp.com/terraform/plugin/sdkv2/schemas/schema-behaviors#forcenew) from `replication_instance_arn`, allowing in-place migration between DMS instances ([#30721](https://github.com/hashicorp/terraform-provider-aws/issues/30721)) +* resource/aws_lambda_function: Add `logging_config` configuration block in support of [advanced logging controls](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced) ([#35050](https://github.com/hashicorp/terraform-provider-aws/issues/35050)) * resource/aws_lambda_function: Add support for `python3.12` `runtime` value ([#35049](https://github.com/hashicorp/terraform-provider-aws/issues/35049)) * resource/aws_lambda_layer_version: Add support for `python3.12` `compatible_runtimes` value ([#35049](https://github.com/hashicorp/terraform-provider-aws/issues/35049)) * resource/aws_s3_bucket: Modify server-side encryption configuration error handling, enabling support for NetApp StorageGRID ([#34890](https://github.com/hashicorp/terraform-provider-aws/issues/34890)) From 875dd10ce46ec07981a57180b18927cf56b2102e Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:49:16 -0500 Subject: [PATCH 424/438] Add 'flex.ExpandString(y)ValueListEmpty'. --- internal/flex/flex.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/internal/flex/flex.go b/internal/flex/flex.go index b5ff60a7cb0..19f812e80c1 100644 --- a/internal/flex/flex.go +++ b/internal/flex/flex.go @@ -76,6 +76,24 @@ func ExpandStringyValueList[E ~string](configured []any) []E { return vs } +// ExpandStringValueList takes the result of flatmap.Expand for an array of strings +// and returns a []string +func ExpandStringValueListEmpty(configured []interface{}) []string { + return ExpandStringyValueListEmpty[string](configured) +} + +func ExpandStringyValueListEmpty[E ~string](configured []any) []E { + vs := make([]E, 0, len(configured)) + for _, v := range configured { + if val, ok := v.(string); ok { // empty string in config turns into nil in []interface{} so !ok + vs = append(vs, E(val)) + } else { + vs = append(vs, E("")) + } + } + return vs +} + // Takes list of pointers to strings. Expand to an array // of raw strings and returns a []interface{} // to keep compatibility w/ schema.NewSetschema.NewSet From 8e1fb9fe33ad16772b90a4792d383c22e5ee6460 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 14:50:06 -0500 Subject: [PATCH 425/438] Use 'flex.ExpandStringValueListEmpty' for S3 Bucket cors_rule. --- internal/service/s3/bucket.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 12855963d10..796cbafeac2 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1222,7 +1222,7 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte input := &s3.PutBucketCorsInput{ Bucket: aws.String(d.Id()), CORSConfiguration: &types.CORSConfiguration{ - CORSRules: expandBucketCORSRules(d.Get("cors_rule").(*schema.Set).List()), + CORSRules: expandBucketCORSRules(d.Get("cors_rule").([]interface{})), }, } @@ -1718,19 +1718,19 @@ func expandBucketCORSRules(l []interface{}) []types.CORSRule { rule := types.CORSRule{} if v, ok := tfMap["allowed_headers"].([]interface{}); ok && len(v) > 0 { - rule.AllowedHeaders = flex.ExpandStringValueList(v) + rule.AllowedHeaders = flex.ExpandStringValueListEmpty(v) } if v, ok := tfMap["allowed_methods"].([]interface{}); ok && len(v) > 0 { - rule.AllowedMethods = flex.ExpandStringValueList(v) + rule.AllowedMethods = flex.ExpandStringValueListEmpty(v) } if v, ok := tfMap["allowed_origins"].([]interface{}); ok && len(v) > 0 { - rule.AllowedOrigins = flex.ExpandStringValueList(v) + rule.AllowedOrigins = flex.ExpandStringValueListEmpty(v) } if v, ok := tfMap["expose_headers"].([]interface{}); ok && len(v) > 0 { - rule.ExposeHeaders = flex.ExpandStringValueList(v) + rule.ExposeHeaders = flex.ExpandStringValueListEmpty(v) } if v, ok := tfMap["max_age_seconds"].(int); ok { From 560a707d946d74f1b898034f5215037d37df112a Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 15:09:44 -0500 Subject: [PATCH 426/438] Fix 'TestAccS3Bucket_Replication_expectVersioningValidationError'. --- internal/service/s3/bucket_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index afefdcd751f..0892c6a5270 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -1615,7 +1615,7 @@ func TestAccS3Bucket_Replication_expectVersioningValidationError(t *testing.T) { Steps: []resource.TestStep{ { Config: testAccBucketConfig_replicationNoVersioning(bucketName), - ExpectError: regexache.MustCompile(`versioning must be enabled to allow S3 bucket replication`), + ExpectError: regexache.MustCompile(`versioning must be enabled on S3 Bucket \(.*\) to allow replication`), }, }, }) From 13ed607b59fe229441ffe56073e38a86500ca4f2 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 15:14:43 -0500 Subject: [PATCH 427/438] Fix 'flattenBucketDestination'. --- internal/service/s3/bucket.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 796cbafeac2..bfef9416d4d 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -2686,7 +2686,7 @@ func flattenBucketDestination(dest *types.Destination) []interface{} { } if dest.Account != nil { - m["account"] = aws.ToString(dest.Account) + m["account_id"] = aws.ToString(dest.Account) } if dest.Bucket != nil { From 5408f8f50d1713ef76d79f11ba6a032054c8adfb Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 15:33:25 -0500 Subject: [PATCH 428/438] Fix 'expandBucketVersioningConfigurationCreate'. --- internal/service/s3/bucket.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index bfef9416d4d..1c149c2819f 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -37,7 +37,6 @@ import ( tfslices "github.com/hashicorp/terraform-provider-aws/internal/slices" tftags "github.com/hashicorp/terraform-provider-aws/internal/tags" "github.com/hashicorp/terraform-provider-aws/internal/tfresource" - itypes "github.com/hashicorp/terraform-provider-aws/internal/types" "github.com/hashicorp/terraform-provider-aws/internal/verify" "github.com/hashicorp/terraform-provider-aws/names" "golang.org/x/exp/slices" @@ -1925,7 +1924,7 @@ func expandBucketVersioningConfigurationCreate(l []interface{}) *types.Versionin apiObject.MFADelete = types.MFADeleteEnabled } - if itypes.IsZero(&apiObject) { + if apiObject.MFADelete == "" && apiObject.Status == "" { return nil } From 8183adfb0336a147bb18246469db88d3ebc2d8f9 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 15:40:44 -0500 Subject: [PATCH 429/438] Fix 'missing required field, PutBucketVersioningInput.VersioningConfiguration'. --- internal/service/s3/bucket.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/internal/service/s3/bucket.go b/internal/service/s3/bucket.go index 1c149c2819f..4773a905059 100644 --- a/internal/service/s3/bucket.go +++ b/internal/service/s3/bucket.go @@ -1283,17 +1283,19 @@ func resourceBucketUpdate(ctx context.Context, d *schema.ResourceData, meta inte versioningConfig = expandBucketVersioningConfigurationUpdate(v) } - input := &s3.PutBucketVersioningInput{ - Bucket: aws.String(d.Id()), - VersioningConfiguration: versioningConfig, - } + if versioningConfig != nil { + input := &s3.PutBucketVersioningInput{ + Bucket: aws.String(d.Id()), + VersioningConfiguration: versioningConfig, + } - _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { - return conn.PutBucketVersioning(ctx, input) - }, errCodeNoSuchBucket) + _, err := tfresource.RetryWhenAWSErrCodeEquals(ctx, d.Timeout(schema.TimeoutUpdate), func() (interface{}, error) { + return conn.PutBucketVersioning(ctx, input) + }, errCodeNoSuchBucket) - if err != nil { - return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) versioning: %s", d.Id(), err) + if err != nil { + return sdkdiag.AppendErrorf(diags, "putting S3 Bucket (%s) versioning: %s", d.Id(), err) + } } } From 47242cfc4de06be6349af163eb331267836a1e51 Mon Sep 17 00:00:00 2001 From: Kit Ewbank Date: Fri, 22 Dec 2023 16:44:10 -0500 Subject: [PATCH 430/438] Use 'aws-sdk-go-base/v2/tfawserr', not 'aws-sdk-go-base/v2/awsv1shim/v2/tfawserr'. --- internal/service/s3/bucket_test.go | 2 +- internal/service/s3/service_package.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/s3/bucket_test.go b/internal/service/s3/bucket_test.go index 0892c6a5270..72da945e79d 100644 --- a/internal/service/s3/bucket_test.go +++ b/internal/service/s3/bucket_test.go @@ -19,7 +19,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/s3/types" "github.com/aws/aws-sdk-go/service/cloudformation" "github.com/aws/aws-sdk-go/service/cloudfront" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/id" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" diff --git a/internal/service/s3/service_package.go b/internal/service/s3/service_package.go index 43ef06b8ac5..44376534a4f 100644 --- a/internal/service/s3/service_package.go +++ b/internal/service/s3/service_package.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/service/s3" - "github.com/hashicorp/aws-sdk-go-base/v2/awsv1shim/v2/tfawserr" + "github.com/hashicorp/aws-sdk-go-base/v2/tfawserr" "github.com/hashicorp/terraform-provider-aws/internal/conns" "github.com/hashicorp/terraform-provider-aws/names" ) From 5deca6413355b80d737a33f646223798dc0af2cc Mon Sep 17 00:00:00 2001 From: changelogbot Date: Fri, 22 Dec 2023 21:51:30 +0000 Subject: [PATCH 431/438] Update CHANGELOG.md for #35035 --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 07a3c56d976..e0782598725 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,7 +16,9 @@ ENHANCEMENTS: * resource/aws_lambda_function: Add `logging_config` configuration block in support of [advanced logging controls](https://docs.aws.amazon.com/lambda/latest/dg/monitoring-cloudwatchlogs.html#monitoring-cloudwatchlogs-advanced) ([#35050](https://github.com/hashicorp/terraform-provider-aws/issues/35050)) * resource/aws_lambda_function: Add support for `python3.12` `runtime` value ([#35049](https://github.com/hashicorp/terraform-provider-aws/issues/35049)) * resource/aws_lambda_layer_version: Add support for `python3.12` `compatible_runtimes` value ([#35049](https://github.com/hashicorp/terraform-provider-aws/issues/35049)) +* resource/aws_s3_bucket: Modify resource Read to support third-party S3 API implementations. Because we cannot easily test this functionality, it is best effort and we ask for community help in testing ([#35035](https://github.com/hashicorp/terraform-provider-aws/issues/35035)) * resource/aws_s3_bucket: Modify server-side encryption configuration error handling, enabling support for NetApp StorageGRID ([#34890](https://github.com/hashicorp/terraform-provider-aws/issues/34890)) +* resource/aws_verifiedaccess_endpoint: Add `policy_document` argument ([#34264](https://github.com/hashicorp/terraform-provider-aws/issues/34264)) BUG FIXES: From 9f159931367b97071815e5d9c8083cfb92e5e1c2 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 27 Dec 2023 12:21:05 -0600 Subject: [PATCH 432/438] aws_networkfirewall_firewall: remove timeout specific test --- .../service/networkfirewall/firewall_test.go | 68 ------------------- 1 file changed, 68 deletions(-) diff --git a/internal/service/networkfirewall/firewall_test.go b/internal/service/networkfirewall/firewall_test.go index b22d42bf8d4..f8081b940b2 100644 --- a/internal/service/networkfirewall/firewall_test.go +++ b/internal/service/networkfirewall/firewall_test.go @@ -402,55 +402,6 @@ func TestAccNetworkFirewallFirewall_tags(t *testing.T) { }) } -func TestAccNetworkFirewallFirewall_timeout(t *testing.T) { - ctx := acctest.Context(t) - rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) - resourceName := "aws_networkfirewall_firewall.test" - policyResourceName := "aws_networkfirewall_firewall_policy.test" - subnetResourceName := "aws_subnet.test.0" - vpcResourceName := "aws_vpc.test" - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(ctx, t); testAccPreCheck(ctx, t) }, - ErrorCheck: acctest.ErrorCheck(t, networkfirewall.EndpointsID), - ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, - CheckDestroy: testAccCheckFirewallDestroy(ctx), - Steps: []resource.TestStep{ - { - Config: testAccFirewallConfig_timeout(rName), - Check: resource.ComposeTestCheckFunc( - testAccCheckFirewallExists(ctx, resourceName), - acctest.CheckResourceAttrRegionalARN(resourceName, "arn", "network-firewall", fmt.Sprintf("firewall/%s", rName)), - resource.TestCheckResourceAttr(resourceName, "delete_protection", "false"), - resource.TestCheckResourceAttr(resourceName, "description", ""), - resource.TestCheckResourceAttrPair(resourceName, "firewall_policy_arn", policyResourceName, "arn"), - resource.TestCheckResourceAttr(resourceName, "firewall_status.#", "1"), - resource.TestCheckResourceAttr(resourceName, "firewall_status.0.sync_states.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "firewall_status.0.sync_states.*.availability_zone", subnetResourceName, "availability_zone"), - resource.TestMatchTypeSetElemNestedAttrs(resourceName, "firewall_status.0.sync_states.*", map[string]*regexp.Regexp{ - "attachment.0.endpoint_id": regexache.MustCompile(`vpce-`), - }), - resource.TestCheckTypeSetElemAttrPair(resourceName, "firewall_status.0.sync_states.*.attachment.0.subnet_id", subnetResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "name", rName), - resource.TestCheckResourceAttrPair(resourceName, "vpc_id", vpcResourceName, "id"), - resource.TestCheckResourceAttr(resourceName, "subnet_mapping.#", "1"), - resource.TestCheckTypeSetElemAttrPair(resourceName, "subnet_mapping.*.subnet_id", subnetResourceName, "id"), - resource.TestCheckTypeSetElemNestedAttrs(resourceName, "subnet_mapping.*", map[string]string{ - "ip_address_type": networkfirewall.IPAddressTypeIpv4, - }), - resource.TestCheckResourceAttr(resourceName, "tags.%", "0"), - resource.TestCheckResourceAttrSet(resourceName, "update_token"), - ), - }, - { - ResourceName: resourceName, - ImportState: true, - ImportStateVerify: true, - }, - }, - }) -} - func TestAccNetworkFirewallFirewall_disappears(t *testing.T) { ctx := acctest.Context(t) rName := sdkacctest.RandomWithPrefix(acctest.ResourcePrefix) @@ -738,22 +689,3 @@ resource "aws_networkfirewall_firewall" "test" { } `, rName)) } - -func testAccFirewallConfig_timeout(rName string) string { - return acctest.ConfigCompose(testAccFirewallConfig_base(rName), fmt.Sprintf(` -resource "aws_networkfirewall_firewall" "test" { - name = %[1]q - firewall_policy_arn = aws_networkfirewall_firewall_policy.test.arn - vpc_id = aws_vpc.test.id - - subnet_mapping { - subnet_id = aws_subnet.test[0].id - } - - timeouts { - create = "50m" - delete = "50m" - } -} -`, rName)) -} From 085d9099b106d7a38ed8e17c8ddb434b2c97c08e Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 27 Dec 2023 12:24:05 -0600 Subject: [PATCH 433/438] fmt tests --- internal/service/networkfirewall/firewall_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/service/networkfirewall/firewall_test.go b/internal/service/networkfirewall/firewall_test.go index f8081b940b2..23b10a8942c 100644 --- a/internal/service/networkfirewall/firewall_test.go +++ b/internal/service/networkfirewall/firewall_test.go @@ -602,7 +602,7 @@ resource "aws_networkfirewall_firewall" "test" { } timeouts { - update = "1h" + update = "1h" } } `, rName)) @@ -638,7 +638,7 @@ resource "aws_networkfirewall_firewall" "test" { } timeouts { - update = "1h" + update = "1h" } } `, rName)) From f0adde8ac03f1a5cec0dc57db15e809638289232 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 27 Dec 2023 14:14:41 -0600 Subject: [PATCH 434/438] aws_kms_key: use RequiredWith --- internal/service/kms/key.go | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/internal/service/kms/key.go b/internal/service/kms/key.go index 0bee72250c8..648dc6bfaff 100644 --- a/internal/service/kms/key.go +++ b/internal/service/kms/key.go @@ -62,12 +62,6 @@ func ResourceKey() *schema.Resource { ForceNew: true, ValidateFunc: validation.StringLenBetween(1, 22), }, - "xks_key_id": { - Type: schema.TypeString, - Optional: true, - ForceNew: true, - ValidateFunc: validation.StringLenBetween(1, 128), - }, "customer_master_key_spec": { Type: schema.TypeString, Optional: true, @@ -127,6 +121,13 @@ func ResourceKey() *schema.Resource { }, names.AttrTags: tftags.TagsSchema(), names.AttrTagsAll: tftags.TagsSchemaComputed(), + "xks_key_id": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + RequiredWith: []string{"custom_key_store_id"}, + ValidateFunc: validation.StringLenBetween(1, 128), + }, }, } } @@ -159,17 +160,14 @@ func resourceKeyCreate(ctx context.Context, d *schema.ResourceData, meta interfa input.Policy = aws.String(v.(string)) } - if v, ok := d.GetOk("xks_key_id"); ok { - if _, ok := d.GetOk("custom_key_store_id"); !ok { - return sdkdiag.AppendErrorf(diags, "custom_key_store_id must be set when xks_key_id is set") - } + if v, ok := d.GetOk("custom_key_store_id"); ok { + input.Origin = aws.String(kms.OriginTypeAwsCloudhsm) + input.CustomKeyStoreId = aws.String(v.(string)) + } + if v, ok := d.GetOk("xks_key_id"); ok { input.Origin = aws.String(kms.OriginTypeExternalKeyStore) - input.CustomKeyStoreId = aws.String(d.Get("custom_key_store_id").(string)) input.XksKeyId = aws.String(v.(string)) - } else if v, ok := d.GetOk("custom_key_store_id"); ok { - input.Origin = aws.String(kms.OriginTypeAwsCloudhsm) - input.CustomKeyStoreId = aws.String(v.(string)) } // AWS requires any principal in the policy to exist before the key is created. From 69cc0a9c86e66325378dffa7619b5f3c189cebae Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 27 Dec 2023 14:47:01 -0600 Subject: [PATCH 435/438] add CHANGELOG entry --- .changelog/31216.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .changelog/31216.txt diff --git a/.changelog/31216.txt b/.changelog/31216.txt new file mode 100644 index 00000000000..768a57ec890 --- /dev/null +++ b/.changelog/31216.txt @@ -0,0 +1,3 @@ +```release-note:enhancement +resource/aws_kms_key: Add `xks_key_id` attribute +``` \ No newline at end of file From eb7c806b4b71d5595e893e477c8d6206e9af4853 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Wed, 27 Dec 2023 14:50:52 -0600 Subject: [PATCH 436/438] aws_kms_key: update documentation --- website/docs/r/kms_key.html.markdown | 1 + 1 file changed, 1 insertion(+) diff --git a/website/docs/r/kms_key.html.markdown b/website/docs/r/kms_key.html.markdown index 303451e7776..a6dc528031d 100644 --- a/website/docs/r/kms_key.html.markdown +++ b/website/docs/r/kms_key.html.markdown @@ -48,6 +48,7 @@ If the KMS key is a multi-Region primary key with replicas, the waiting period b * `enable_key_rotation` - (Optional) Specifies whether [key rotation](http://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html) is enabled. Defaults to `false`. * `multi_region` - (Optional) Indicates whether the KMS key is a multi-Region (`true`) or regional (`false`) key. Defaults to `false`. * `tags` - (Optional) A map of tags to assign to the object. If configured with a provider [`default_tags` configuration block](https://registry.terraform.io/providers/hashicorp/aws/latest/docs#default_tags-configuration-block) present, tags with matching keys will overwrite those defined at the provider-level. +* `xks_key_id` - (Optional) Identifies the external key that serves as key material for the KMS key in an external key store. ## Attribute Reference From 1633925088bca1c4bd32ae389607b38417c494f2 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 28 Dec 2023 15:20:00 -0600 Subject: [PATCH 437/438] aws_glue_classifier: use Values() function --- internal/service/glue/classifier.go | 20 ++++---------------- 1 file changed, 4 insertions(+), 16 deletions(-) diff --git a/internal/service/glue/classifier.go b/internal/service/glue/classifier.go index eb88ad45409..002846c9da7 100644 --- a/internal/service/glue/classifier.go +++ b/internal/service/glue/classifier.go @@ -122,16 +122,10 @@ func ResourceClassifier() *schema.Resource { Optional: true, }, "serde": { - Type: schema.TypeString, - Optional: true, - // Computed is required because if nothing is set, the API - // will return "" which will be translated to "None" - Computed: true, - ValidateFunc: validation.StringInSlice([]string{ - "OpenCSVSerDe", - "LazySimpleSerDe", - "None", - }, false), + Type: schema.TypeString, + Optional: true, + Computed: true, + ValidateFunc: validation.StringInSlice(glue.CsvSerdeOption_Values(), false), }, }, }, @@ -489,12 +483,6 @@ func flattenCSVClassifier(csvClassifier *glue.CsvClassifier) []map[string]interf "serde": aws.StringValue(csvClassifier.Serde), } - // When setting the value of `serde` to "None", it comes back as "" within the API - // This needs to be translated from "" or the validation will fail. - if m["serde"].(string) == "" { - m["serde"] = "None" - } - return []map[string]interface{}{m} } From 1966e343315773ea1aaa1734c24361177c4abf61 Mon Sep 17 00:00:00 2001 From: Adrian Johnson Date: Thu, 28 Dec 2023 15:26:07 -0600 Subject: [PATCH 438/438] aws_glue_classifier: test fmt --- internal/service/glue/classifier_test.go | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/internal/service/glue/classifier_test.go b/internal/service/glue/classifier_test.go index 1e2d9a4b8f5..8e5533f5926 100644 --- a/internal/service/glue/classifier_test.go +++ b/internal/service/glue/classifier_test.go @@ -74,7 +74,6 @@ func TestAccGlueClassifier_csvClassifier(t *testing.T) { }) } -// Test to ensure the Serde value is set properly in a CsvClassifier block func TestAccGlueClassifier_csvClassifierCustomSerde(t *testing.T) { ctx := acctest.Context(t) var classifier glue.Classifier @@ -88,7 +87,6 @@ func TestAccGlueClassifier_csvClassifierCustomSerde(t *testing.T) { ProtoV5ProviderFactories: acctest.ProtoV5ProviderFactories, CheckDestroy: testAccCheckClassifierDestroy(ctx), Steps: []resource.TestStep{ - // Set the serde to the default value (None) { Config: testAccClassifierConfig_csvWithSerde(rName, false, "PRESENT", "|", false, "None"), Check: resource.ComposeTestCheckFunc( @@ -98,7 +96,6 @@ func TestAccGlueClassifier_csvClassifierCustomSerde(t *testing.T) { resource.TestCheckResourceAttr(resourceName, "name", rName), ), }, - // Update the serde to a non-default value (OpenCSVSerDe) { Config: testAccClassifierConfig_csvWithSerde(rName, false, "PRESENT", ",", false, "OpenCSVSerDe"), Check: resource.ComposeTestCheckFunc( @@ -555,14 +552,14 @@ resource "aws_glue_classifier" "test" { func testAccClassifierConfig_csvWithSerde(rName string, allowSingleColumn bool, containsHeader string, delimiter string, disableValueTrimming bool, serde string) string { return fmt.Sprintf(` resource "aws_glue_classifier" "test" { - name = "%s" + name = %[1]q csv_classifier { - allow_single_column = "%t" - contains_header = "%s" - delimiter = "%s" - disable_value_trimming = "%t" - serde = "%s" + allow_single_column = %[2]t + contains_header = %[3]q + delimiter = %[4]q + disable_value_trimming = %[5]t + serde = %[6]q header = ["header_column1", "header_column2"] } }